From d82fa3d6edbdc9f9fdca9af8183fe4936978404f Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 21 Nov 2023 08:38:05 +0100 Subject: [PATCH 01/76] Restrict best LC update collection to canonical blocks Currently, the best LC update for a sync committee period may refer to blocks that have later been orphaned, if they rank better than canonical blocks according to `is_better_update`. This was done because the most important task of the light client sync protocol is to track the correct `next_sync_committee`. However, practical implementation is quite tricky because existing infrastructure such as fork choice modules can only be reused in limited form when collecting light client data. Furthermore, it becomes impossible to deterministically obtain the absolute best LC update available for any given sync committee period, because orphaned blocks may become unavailable. For these reasons, `LightClientUpdate` should only be served if they refer to data from the canonical chain as selected by fork choice. This also assists efforts for a reliable backward sync in the future. --- specs/altair/light-client/full-node.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md index 27651af01f..9a69b253f5 100644 --- a/specs/altair/light-client/full-node.md +++ b/specs/altair/light-client/full-node.md @@ -143,7 +143,7 @@ Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to ` - `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot` - `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)` -- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time. +- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical chain as selected by fork choice are considered, regardless of ranking by `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD NOT be provided. ### `create_light_client_finality_update` From be2984156bb086d9e73445245ad046a0e8054228 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:00:17 +0100 Subject: [PATCH 02/76] Add canonical data collection test infrastructure --- .../light_client/test_data_collection.py | 934 ++++++++++++++++++ tests/formats/light_client/README.md | 1 + tests/formats/light_client/data_collection.md | 76 ++ tests/generators/light_client/main.py | 1 + 4 files changed, 1012 insertions(+) create mode 100644 tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py create mode 100644 tests/formats/light_client/data_collection.md diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py new file mode 100644 index 0000000000..264c654810 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -0,0 +1,934 @@ +from typing import (Any, Dict, List, Set) +from dataclasses import dataclass + +from eth_utils import encode_hex +from eth2spec.test.context import ( + spec_state_test_with_matching_config, + with_presets, + with_light_client, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, + MINIMAL, +) +from eth2spec.test.helpers.fork_transition import ( + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.light_client import ( + compute_start_slot_at_sync_committee_period, + get_sync_aggregate, + upgrade_lc_header_to_new_spec, + upgrade_lc_update_to_new_spec, +) + + +def next_epoch_boundary_slot(spec, slot): + ## Compute the first possible epoch boundary state slot of a `Checkpoint` + ## referring to a block at given slot. + epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) + return spec.compute_start_slot_at_epoch(epoch) + + +@dataclass(frozen=True) +class BlockId(object): + slot: Any + root: Any + + +def block_to_block_id(block): + return BlockId( + slot=block.message.slot, + root=block.message.hash_tree_root(), + ) + + +def state_to_block_id(state): + parent_header = state.latest_block_header.copy() + parent_header.state_root = state.hash_tree_root() + return BlockId(slot=parent_header.slot, root=parent_header.hash_tree_root()) + + +def bootstrap_bid(bootstrap): + return BlockId( + slot=bootstrap.header.beacon.slot, + root=bootstrap.header.beacon.hash_tree_root(), + ) + + +def update_attested_bid(update): + return BlockId( + slot=update.attested_header.beacon.slot, + root=update.attested_header.beacon.hash_tree_root(), + ) + + +@dataclass +class ForkedBeaconState(object): + spec: Any + data: Any + + +@dataclass +class ForkedSignedBeaconBlock(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientHeader(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientBootstrap(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientFinalityUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientOptimisticUpdate(object): + spec: Any + data: Any + + +@dataclass +class CachedLightClientData(object): + # Sync committee branches at block's post-state + current_sync_committee_branch: Any # CurrentSyncCommitteeBranch + next_sync_committee_branch: Any # NextSyncCommitteeBranch + + # Finality information at block's post-state + finalized_slot: Any # Slot + finality_branch: Any # FinalityBranch + + # Best / latest light client data + current_period_best_update: ForkedLightClientUpdate + latest_signature_slot: Any # Slot + + +@dataclass +class LightClientDataCache(object): + # Cached data for creating future `LightClientUpdate` instances. + # Key is the block ID of which the post state was used to get the data. + # Data stored for the finalized head block and all non-finalized blocks. + data: Dict[BlockId, CachedLightClientData] + + # Light client data for the latest slot that was signed by at least + # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head + latest: ForkedLightClientFinalityUpdate + + # The earliest slot for which light client data is imported + tail_slot: Any # Slot + + +@dataclass +class LightClientDataDb(object): + headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader + current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch + sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee + best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate + + +@dataclass +class LightClientDataStore(object): + # Cached data to accelerate creating light client data + cache: LightClientDataCache + + # Persistent light client data + db: LightClientDataDb + + +@dataclass +class LightClientDataCollectionTest(object): + steps: List[Dict[str, Any]] + files: Set[str] + + # Fork schedule + spec: Any + phases: Any + + # History access + blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock + finalized_block_roots: Dict[Any, Any] # Slot -> Root + states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + latest_finalized_epoch: Any # Epoch + latest_finalized_bid: BlockId + historical_tail_slot: Any # Slot + + # Light client data + lc_data_store: LightClientDataStore + + +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] + try: + block = test.blocks[bid.root] + while True: + if block.data.message.slot <= slot: + return block_to_block_id(block.data) + + block = test.blocks[block.data.message.parent_root] + except KeyError: + return None + + +def block_id_at_finalized_slot(test, slot): # -> Optional[BlockId] + while slot >= test.historical_tail_slot: + try: + return BlockId(slot=slot, root=test.finalized_block_roots[slot]) + except KeyError: + slot = slot - 1 + return None + + +def get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] + low_slot = max( + test.historical_tail_slot, + test.spec.compute_start_slot_at_epoch(test.spec.config.ALTAIR_FORK_EPOCH) + ) + if period < test.spec.compute_sync_committee_period_at_slot(low_slot): + return None + period_start_slot = compute_start_slot_at_sync_committee_period(test.spec, period) + sync_committee_slot = max(period_start_slot, low_slot) + bid = block_id_at_finalized_slot(test, sync_committee_slot) + if bid is None: + return None + block = test.blocks[bid.root] + state = test.finalized_checkpoint_states[block.data.message.state_root] + if sync_committee_slot > state.data.slot: + state.spec, state.data, _ = transition_across_forks(state.spec, state.data, sync_committee_slot, phases=test.phases) + assert is_post_altair(state.spec) + return state.data.current_sync_committee + + +def light_client_header_for_block(test, block): # -> ForkedLightClientHeader + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + return None + return block.data.message.body.sync_aggregate + + +def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData + ## Fetch cached light client data about a given block. + ## Data must be cached (`cache_light_client_data`) before calling this function. + try: + return lc_data_store.cache.data[bid] + except KeyError: + raise ValueError("Trying to get light client data that was not cached") + + +def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): + ## Cache data for a given block and its post-state to speed up creating future + ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + ## block and state. + cached_data = CachedLightClientData( + current_sync_committee_branch=spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX), + next_sync_committee_branch=spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX), + finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + finality_branch=spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX), + current_period_best_update=current_period_best_update, + latest_signature_slot=latest_signature_slot, + ) + if bid in lc_data_store.cache.data: + raise ValueError("Redundant `cache_light_client_data` call") + lc_data_store.cache.data[bid] = cached_data + + +def delete_light_client_data(lc_data_store, bid): + ## Delete cached light client data for a given block. This needs to be called + ## when a block becomes unreachable due to finalization of a different fork. + del lc_data_store.cache.data[bid] + + +def create_light_client_finality_update_from_light_client_data(test, + attested_bid, + signature_slot, + sync_aggregate): # -> ForkedLightClientFinalityUpdate + attested_header = light_client_header_for_block_id(test, attested_bid) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + finalized_bid = block_id_at_finalized_slot(test, attested_data.finalized_slot) + if finalized_bid is not None: + if finalized_bid.slot != attested_data.finalized_slot: + # Empty slots at end of epoch, update cache for latest block slot + attested_data.finalized_slot = finalized_bid.slot + if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=attested_header.spec.LightClientHeader(), + ) + else: + finalized_header = light_client_header_for_block_id(test, finalized_bid) + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=upgrade_lc_header_to_new_spec( + finalized_header.spec, + attested_header.spec, + finalized_header.data, + ) + ) + finality_branch = attested_data.finality_branch + return ForkedLightClientFinalityUpdate( + spec=attested_header.spec, + data=attested_header.spec.LightClientFinalityUpdate( + attested_header=attested_header.data, + finalized_header=finalized_header.data, + finality_branch=finality_branch, + sync_aggregate=sync_aggregate, + signature_slot=signature_slot, + ), + ) + + +def create_light_client_update_from_light_client_data(test, + attested_bid, + signature_slot, + sync_aggregate, + next_sync_committee): # -> ForkedLightClientUpdate + finality_update = create_light_client_finality_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + return ForkedLightClientUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientUpdate( + attested_header=finality_update.data.attested_header, + next_sync_committee=next_sync_committee, + next_sync_committee_branch=attested_data.next_sync_committee_branch, + finalized_header=finality_update.data.finalized_header, + finality_branch=finality_update.data.finality_branch, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ) + ) + + +def create_light_client_update(test, spec, state, block, parent_bid): + ## Create `LightClientUpdate` instances for a given block and its post-state, + ## and keep track of best / latest ones. Data about the parent block's + ## post-state must be cached (`cache_light_client_data`) before calling this. + + # Verify attested block (parent) is recent enough and that state is available + attested_bid = parent_bid + attested_slot = attested_bid.slot + if attested_slot < test.lc_data_store.cache.tail_slot: + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + return + + # If sync committee period changed, reset `best` + attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) + signature_slot = block.message.slot + signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) + attested_data = get_light_client_data(test.lc_data_store, attested_bid) + if attested_period != signature_period: + best = ForkedLightClientUpdate(spec=None, data=None) + else: + best = attested_data.current_period_best_update + + # If sync committee does not have sufficient participants, do not bump latest + sync_aggregate = block.message.body.sync_aggregate + num_active_participants = sum(sync_aggregate.sync_committee_bits) + if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: + latest_signature_slot = attested_data.latest_signature_slot + else: + latest_signature_slot = signature_slot + + # To update `best`, sync committee must have sufficient participants, and + # `signature_slot` must be in `attested_slot`'s sync committee period + if ( + num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + or attested_period != signature_period + ): + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + return + + # Check if light client data improved + update = create_light_client_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) + is_better = ( + best.spec is None + or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec(best.spec, update.spec, best.data)) + ) + + # Update best light client data for current sync committee period + if is_better: + best = update + cache_light_client_data( + test.lc_data_store, + spec, + state, + block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + + +def create_light_client_bootstrap(test, spec, bid): + block = test.blocks[bid.root] + period = spec.compute_sync_committee_period_at_slot(bid.slot) + if period not in test.lc_data_store.db.sync_committees: + test.lc_data_store.db.sync_committees[period] = \ + get_current_sync_committee_for_finalized_period(test, period) + test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( + spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) + test.lc_data_store.db.current_branches[bid.slot] = \ + get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch + + +def process_new_block_for_light_client(test, spec, state, block, parent_bid): + ## Update light client data with information from a new block. + if block.message.slot < test.lc_data_store.cache.tail_slot: + return + + if is_post_altair(spec): + create_light_client_update(test, spec, state, block, parent_bid) + else: + raise ValueError("`tail_slot` cannot be before Altair") + + +def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): + ## Update light client data to account for a new head block. + ## Note that `old_finalized_bid` is not yet updated when this is called. + if head_bid.slot < test.lc_data_store.cache.tail_slot: + return + + # Commit best light client data for non-finalized periods + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) + low_period = spec.compute_sync_committee_period_at_slot(low_slot) + bid = head_bid + for period in reversed(range(low_period, head_period + 1)): + period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 + bid = get_ancestor_of_block_id(test, bid, period_end_slot) + if bid is None or bid.slot < low_slot: + break + best = get_light_client_data(test.lc_data_store, bid).current_period_best_update + if ( + best.spec is None + or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + ): + test.lc_data_store.db.best_updates.pop(period, None) + else: + test.lc_data_store.db.best_updates[period] = best + + # Update latest light client data + head_data = get_light_client_data(test.lc_data_store, head_bid) + signature_slot = head_data.latest_signature_slot + if signature_slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) + if signature_bid is None or signature_bid.slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) + if attested_bid is None or attested_bid.slot < low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + sync_aggregate = sync_aggregate_for_block_id(test, signature_bid) + assert sync_aggregate is not None + test.lc_data_store.cache.latest = create_light_client_finality_update_from_light_client_data( + test, attested_bid, signature_slot, sync_aggregate) + + +def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): + ## Prune cached data that is no longer useful for creating future + ## `LightClientUpdate` and `LightClientBootstrap` instances. + ## This needs to be called whenever `finalized_checkpoint` changes. + finalized_slot = finalized_bid.slot + if finalized_slot < test.lc_data_store.cache.tail_slot: + return + + # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks + first_new_slot = old_finalized_bid.slot + 1 + low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) + boundary_slot = finalized_slot + while boundary_slot >= low_slot: + bid = block_id_at_finalized_slot(test, boundary_slot) + if bid is None: + break + if bid.slot >= low_slot: + create_light_client_bootstrap(test, spec, bid) + boundary_slot = next_epoch_boundary_slot(spec, bid.slot) + if boundary_slot < spec.SLOTS_PER_EPOCH: + break + boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH + + # Prune light client data that is no longer referrable by future updates + bids_to_delete = [] + for bid in test.lc_data_store.cache.data: + if bid.slot >= finalized_bid.slot: + continue + bids_to_delete.append(bid) + for bid in bids_to_delete: + delete_light_client_data(test.lc_data_store, bid) + + +def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap + try: + header = test.lc_data_store.db.headers[block_root] + except KeyError: + return ForkedLightClientBootstrap(spec=None, data=None) + + slot = header.data.beacon.slot + period = header.spec.compute_sync_committee_period_at_slot(slot) + return ForkedLightClientBootstrap( + spec=header.spec, + data=header.spec.LightClientBootstrap( + header=header.data, + current_sync_committee=test.lc_data_store.db.sync_committees[period], + current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], + ) + ) + + +def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate + try: + return test.lc_data_store.db.best_updates[period] + except KeyError: + return ForkedLightClientUpdate(spec=None, data=None) + + +def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate + return test.lc_data_store.cache.latest + + +def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate + finality_update = get_light_client_finality_update(test) + if finality_update.spec is None: + return ForkedLightClientOptimisticUpdate(spec=None, data=None) + return ForkedLightClientOptimisticUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientOptimisticUpdate( + attested_header=finality_update.data.attested_header, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ), + ) + + +def setup_test(spec, state, phases=None): + assert spec.compute_slots_since_epoch_start(state.slot) == 0 + + test = LightClientDataCollectionTest( + steps=[], + files=set(), + spec=spec, + phases=phases, + blocks={}, + finalized_block_roots={}, + states={}, + finalized_checkpoint_states={}, + latest_finalized_epoch=state.finalized_checkpoint.epoch, + latest_finalized_bid=BlockId( + slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + root=state.finalized_checkpoint.root, + ), + historical_tail_slot=state.slot, + lc_data_store=LightClientDataStore( + cache=LightClientDataCache( + data={}, + latest=ForkedLightClientFinalityUpdate(spec=None, data=None), + tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), + ), + db=LightClientDataDb( + headers={}, + current_branches={}, + sync_committees={}, + best_updates={}, + ), + ), + ) + bid = state_to_block_id(state) + yield "initial_state", state + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( + message=spec.BeaconBlock(state_root=state.hash_tree_root()), + )) + test.finalized_block_roots[bid.slot] = bid.root + test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + cache_light_client_data( + test.lc_data_store, spec, state, bid, + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + create_light_client_bootstrap(test, spec, bid) + + return test + + +def finish_test(test): + yield "steps", test.steps + + +def encode_object(test, prefix, obj, slot, genesis_validators_root): + yield from [] # Consistently enable `yield from` syntax in calling tests + + file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" + if file_name not in test.files: + test.files.add(file_name) + yield file_name, obj.data + return { + "fork_digest": encode_hex(obj.spec.compute_fork_digest( + obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), + genesis_validators_root, + )), + "data": file_name, + } + + +def add_new_block(test, spec, state, slot=None, num_sync_participants=0): + if slot is None: + slot = state.slot + 1 + assert slot > state.slot + parent_bid = state_to_block_id(state) + + # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed + if state.slot < slot - 1: + spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) + + # Compute sync aggregate, using: + # - sync committee based on target slot + # - fork digest based on target slot - 1 + # - signed data based on parent_bid.slot + # All three slots may be from different forks + sync_aggregate, signature_slot = get_sync_aggregate( + spec, state, num_participants=num_sync_participants, phases=test.phases) + assert signature_slot == slot + + # Apply final block with computed sync aggregate + spec, state, block = transition_across_forks( + spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) + bid = block_to_block_id(block) + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) + test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) + process_new_block_for_light_client(test, spec, state, block, parent_bid) + block_obj = yield from encode_object( + test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, + state.genesis_validators_root, + ) + test.steps.append({ + "new_block": block_obj + }) + return spec, state, bid + + +def select_new_head(test, spec, head_bid): + old_finalized_bid = test.latest_finalized_bid + process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) + + # Process finalization + block = test.blocks[head_bid.root] + state = test.states[block.data.message.state_root] + if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: + block = test.blocks[state.data.finalized_checkpoint.root] + bid = block_to_block_id(block.data) + new_finalized_bid = bid + if new_finalized_bid.slot > old_finalized_bid.slot: + old_finalized_epoch = None + new_finalized_epoch = state.data.finalized_checkpoint.epoch + while bid.slot > test.latest_finalized_bid.slot: + test.finalized_block_roots[bid.slot] = bid.root + finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) + if finalized_epoch != old_finalized_epoch: + state = test.states[block.data.message.state_root] + test.finalized_checkpoint_states[block.data.message.state_root] = state + old_finalized_epoch = finalized_epoch + block = test.blocks[block.data.message.parent_root] + bid = block_to_block_id(block.data) + test.latest_finalized_epoch = new_finalized_epoch + test.latest_finalized_bid = new_finalized_bid + process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) + + blocks_to_delete = [] + for block_root, block in test.blocks.items(): + if block.data.message.slot < new_finalized_bid.slot: + blocks_to_delete.append(block_root) + for block_root in blocks_to_delete: + del test.blocks[block_root] + states_to_delete = [] + for state_root, state in test.states.items(): + if state.data.slot < new_finalized_bid.slot: + states_to_delete.append(state_root) + for state_root in states_to_delete: + del test.states[state_root] + + yield from [] # Consistently enable `yield from` syntax in calling tests + + bootstraps = [] + for state in test.finalized_checkpoint_states.values(): + bid = state_to_block_id(state.data) + entry = { + "block_root": encode_hex(bid.root), + } + bootstrap = get_light_client_bootstrap(test, bid.root) + if bootstrap.spec is not None: + bootstrap_obj = yield from encode_object( + test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["bootstrap"] = bootstrap_obj + bootstraps.append(entry) + + best_updates = [] + low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + for period in range(low_period, head_period + 1): + entry = { + "period": int(period), + } + update = get_light_client_update_for_period(test, period) + if update.spec is not None: + update_obj = yield from encode_object( + test, "update", update, update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["update"] = update_obj + best_updates.append(entry) + + checks = { + "latest_finalized_checkpoint": { + "epoch": int(test.latest_finalized_epoch), + "root": encode_hex(test.latest_finalized_bid.root), + }, + "bootstraps": bootstraps, + "best_updates": best_updates, + } + finality_update = get_light_client_finality_update(test) + if finality_update.spec is not None: + finality_update_obj = yield from encode_object( + test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = finality_update_obj + optimistic_update = get_light_client_finality_update(test) + if optimistic_update.spec is not None: + optimistic_update_obj = yield from encode_object( + test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = optimistic_update_obj + + test.steps.append({ + "new_head": { + "head_block_root": encode_hex(head_bid.root), + "checks": checks, + } + }) + + +@with_light_client +@spec_state_test_with_matching_config +@with_presets([MINIMAL], reason="too slow") +def test_light_client_data_collection(spec, state): + # Start test + test = yield from setup_test(spec, state) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # No blocks have been imported, so no other light client data is available + period = spec.compute_sync_committee_period_at_slot(state.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch A with a block that has an empty sync aggregate + spec_a, state_a, bid_1 = yield from add_new_block(test, spec, state, slot=1) + yield from select_new_head(test, spec_a, bid_1) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch B with a block that has 1 participant + spec_b, state_b, bid_2 = yield from add_new_block(test, spec, state, slot=2, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_2) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid + assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch A, once more with an empty sync aggregate + spec_a, state_a, bid_3 = yield from add_new_block(test, spec_a, state_a, slot=3) + yield from select_new_head(test, spec_a, bid_3) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Build on branch B, this time with an empty sync aggregate + spec_b, state_b, bid_4 = yield from add_new_block(test, spec_b, state_b, slot=4) + yield from select_new_head(test, spec_b, bid_4) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid + assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch B, once more with 1 participant + spec_b, state_b, bid_5 = yield from add_new_block(test, spec_b, state_b, slot=5, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_5) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4 + + # Build on branch B, this time with 3 participants + spec_b, state_b, bid_6 = yield from add_new_block(test, spec_b, state_b, slot=6, num_sync_participants=3) + yield from select_new_head(test, spec_b, bid_6) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_5 + + # Build on branch A, with 2 participants + spec_a, state_a, bid_7 = yield from add_new_block(test, spec_a, state_a, slot=7, num_sync_participants=2) + yield from select_new_head(test, spec_a, bid_7) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_3 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3 + + # Branch A: epoch 1, slot 5 + slot = spec_a.compute_start_slot_at_epoch(1) + 5 + spec_a, state_a, bid_1_5 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=4) + yield from select_new_head(test, spec_a, bid_1_5) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_7 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_7 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_7 + + # Branch B: epoch 2, slot 4 + slot = spec_b.compute_start_slot_at_epoch(2) + 4 + spec_b, state_b, bid_2_4 = yield from add_new_block(test, spec_b, state_b, slot=slot, num_sync_participants=5) + yield from select_new_head(test, spec_b, bid_2_4) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_6 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_6 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_6 + + # Branch A: epoch 3, slot 0 + slot = spec_a.compute_start_slot_at_epoch(3) + 0 + spec_a, state_a, bid_3_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_3_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert state_a.slot == spec_a.compute_start_slot_at_epoch(4) - 1 + bid_3_n = bid_a + + # Branch A: epoch 4, slot 0 + slot = spec_a.compute_start_slot_at_epoch(4) + 0 + spec_a, state_a, bid_4_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_4_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert state_a.slot == spec_a.compute_start_slot_at_epoch(5) - 1 + bid_4_n = bid_a + + # Branch A: epoch 6, slot 2 + slot = spec_a.compute_start_slot_at_epoch(6) + 2 + spec_a, state_a, bid_6_2 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_6_2) + assert bootstrap_bid(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 + assert bootstrap_bid(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert bootstrap_bid(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4_n + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4_n + + # Finish test + yield from finish_test(test) diff --git a/tests/formats/light_client/README.md b/tests/formats/light_client/README.md index 505b416019..050b406f0b 100644 --- a/tests/formats/light_client/README.md +++ b/tests/formats/light_client/README.md @@ -3,6 +3,7 @@ This series of tests provides reference test vectors for the light client sync protocol spec. Handlers: +- `data_collection`: see [Light client data collection test format](./data_collection.md) - `single_merkle_proof`: see [Single leaf merkle proof test format](./single_merkle_proof.md) - `sync`: see [Light client sync test format](./sync.md) - `update_ranking`: see [`LightClientUpdate` ranking test format](./update_ranking.md) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md new file mode 100644 index 0000000000..d8f13e5ed0 --- /dev/null +++ b/tests/formats/light_client/data_collection.md @@ -0,0 +1,76 @@ +# Light client data collection tests + +This series of tests provies reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. + +## Test case format + +### `initial_state.ssz_snappy` + +An SSZ-snappy encoded object of type `BeaconState` to initialize the blockchain from. The state's `slot` is epoch aligned. + +### `steps.yaml` + +The steps to execute in sequence. + +#### `new_block` execution step + +The new block described by the test step should be imported, but does not become head yet. + +```yaml +{ + fork_digest: string -- Encoded `ForkDigest`-context of `block` + data: string -- name of the `*.ssz_snappy` file to load + as a `SignedBeaconBlock` object +} +``` + +#### `new_head` execution step + +The given block (previously imported) should become head, leading to potential updates to: + +- The best `LightClientUpdate` for non-finalized sync committee periods. +- The latest `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`. +- The latest finalized `Checkpoint` (across all branches). +- The available `LightClientBootstrap` instances for newly finalized `Checkpoint`s. + +```yaml +{ + head_block_root: Bytes32 -- string, hex encoded, with 0x prefix + checks: { + latest_finalized_checkpoint: { -- tracked across all branches + epoch: int -- integer, decimal + root: Bytes32 -- string, hex encoded, with 0x prefix + } + bootstraps: [ -- one entry per `LightClientBootstrap` + block_root: Bytes32 -- string, hex encoded, with 0x prefix + bootstrap: { -- only exists if a `LightClientBootstrap` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientBootstrap` object + } + ] + best_updates: [ -- one entry per sync committee period + period: int, -- integer, decimal + update: { -- only exists if a best `LightClientUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientUpdate` object + } + ] + latest_finality_update: { -- only exists if a `LightClientFinalityUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientFinalityUpdate` object + } + latest_optimistic_update: { -- only exists if a `LightClientOptimisticUpdate` is available + fork_digest: string -- Encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientOptimisticUpdate` object + } + } +} +``` + +## Condition + +A test-runner should initialize a simplified blockchain from `initial_state`. An external signal is used to control fork choice. The test-runner should then proceed to execute all the test steps in sequence, collecting light client data during execution. After each `new_head` step, it should verify that the collected light client data matches the provided `checks`. diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index cfe34aee4b..341321a2ae 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -4,6 +4,7 @@ if __name__ == "__main__": altair_mods = {key: 'eth2spec.test.altair.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', 'sync', 'update_ranking', From 2154298e080ff30d8adecc34be7ee204f64174f9 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:01:58 +0100 Subject: [PATCH 03/76] Typo --- tests/formats/light_client/data_collection.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md index d8f13e5ed0..f9c1fa7a0e 100644 --- a/tests/formats/light_client/data_collection.md +++ b/tests/formats/light_client/data_collection.md @@ -1,6 +1,6 @@ # Light client data collection tests -This series of tests provies reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. +This series of tests provides reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. ## Test case format From 248f32b59a81d44e33612cfd5800f00a5973b119 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 6 Feb 2024 13:49:21 +0100 Subject: [PATCH 04/76] Lint --- .../light_client/test_data_collection.py | 39 ++++++++++--------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 264c654810..2cc39131c1 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -26,8 +26,8 @@ def next_epoch_boundary_slot(spec, slot): - ## Compute the first possible epoch boundary state slot of a `Checkpoint` - ## referring to a block at given slot. + # Compute the first possible epoch boundary state slot of a `Checkpoint` + # referring to a block at given slot. epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) return spec.compute_start_slot_at_epoch(epoch) @@ -212,7 +212,8 @@ def get_current_sync_committee_for_finalized_period(test, period): # -> Optiona block = test.blocks[bid.root] state = test.finalized_checkpoint_states[block.data.message.state_root] if sync_committee_slot > state.data.slot: - state.spec, state.data, _ = transition_across_forks(state.spec, state.data, sync_committee_slot, phases=test.phases) + state.spec, state.data, _ = transition_across_forks( + state.spec, state.data, sync_committee_slot, phases=test.phases) assert is_post_altair(state.spec) return state.data.current_sync_committee @@ -242,8 +243,8 @@ def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData - ## Fetch cached light client data about a given block. - ## Data must be cached (`cache_light_client_data`) before calling this function. + # Fetch cached light client data about a given block. + # Data must be cached (`cache_light_client_data`) before calling this function. try: return lc_data_store.cache.data[bid] except KeyError: @@ -251,9 +252,9 @@ def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): - ## Cache data for a given block and its post-state to speed up creating future - ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this - ## block and state. + # Cache data for a given block and its post-state to speed up creating future + # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + # block and state. cached_data = CachedLightClientData( current_sync_committee_branch=spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX), next_sync_committee_branch=spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX), @@ -268,8 +269,8 @@ def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best def delete_light_client_data(lc_data_store, bid): - ## Delete cached light client data for a given block. This needs to be called - ## when a block becomes unreachable due to finalization of a different fork. + # Delete cached light client data for a given block. This needs to be called + # when a block becomes unreachable due to finalization of a different fork. del lc_data_store.cache.data[bid] @@ -335,9 +336,9 @@ def create_light_client_update_from_light_client_data(test, def create_light_client_update(test, spec, state, block, parent_bid): - ## Create `LightClientUpdate` instances for a given block and its post-state, - ## and keep track of best / latest ones. Data about the parent block's - ## post-state must be cached (`cache_light_client_data`) before calling this. + # Create `LightClientUpdate` instances for a given block and its post-state, + # and keep track of best / latest ones. Data about the parent block's + # post-state must be cached (`cache_light_client_data`) before calling this. # Verify attested block (parent) is recent enough and that state is available attested_bid = parent_bid @@ -421,7 +422,7 @@ def create_light_client_bootstrap(test, spec, bid): def process_new_block_for_light_client(test, spec, state, block, parent_bid): - ## Update light client data with information from a new block. + # Update light client data with information from a new block. if block.message.slot < test.lc_data_store.cache.tail_slot: return @@ -432,8 +433,8 @@ def process_new_block_for_light_client(test, spec, state, block, parent_bid): def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): - ## Update light client data to account for a new head block. - ## Note that `old_finalized_bid` is not yet updated when this is called. + # Update light client data to account for a new head block. + # Note that `old_finalized_bid` is not yet updated when this is called. if head_bid.slot < test.lc_data_store.cache.tail_slot: return @@ -477,9 +478,9 @@ def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): - ## Prune cached data that is no longer useful for creating future - ## `LightClientUpdate` and `LightClientBootstrap` instances. - ## This needs to be called whenever `finalized_checkpoint` changes. + # Prune cached data that is no longer useful for creating future + # `LightClientUpdate` and `LightClientBootstrap` instances. + # This needs to be called whenever `finalized_checkpoint` changes. finalized_slot = finalized_bid.slot if finalized_slot < test.lc_data_store.cache.tail_slot: return From c0d037f1b4648738683538aa30ca8ef77bb1a600 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Fri, 23 Feb 2024 13:56:56 +0100 Subject: [PATCH 05/76] Fix missing `optimistc_update` in new tests --- .../eth2spec/test/altair/light_client/test_data_collection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 2cc39131c1..8cd32e40a1 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -747,13 +747,13 @@ def select_new_head(test, spec, head_bid): state.data.genesis_validators_root, ) checks["latest_finality_update"] = finality_update_obj - optimistic_update = get_light_client_finality_update(test) + optimistic_update = get_light_client_optimistic_update(test) if optimistic_update.spec is not None: optimistic_update_obj = yield from encode_object( test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, state.data.genesis_validators_root, ) - checks["latest_finality_update"] = optimistic_update_obj + checks["latest_optimistic_update"] = optimistic_update_obj test.steps.append({ "new_head": { From b8f0ddcf78da9da31e99162648b17f82b709a29c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Sun, 3 Mar 2024 20:49:37 +0100 Subject: [PATCH 06/76] Add more tests for multi-period reorgs --- .../light_client/test_data_collection.py | 157 +++++++++++++++++- 1 file changed, 156 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 8cd32e40a1..55ee5a74be 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -4,11 +4,16 @@ from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, with_presets, + with_state, with_light_client, ) from eth2spec.test.helpers.constants import ( - ALTAIR, + ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, ) from eth2spec.test.helpers.fork_transition import ( @@ -933,3 +938,153 @@ def test_light_client_data_collection(spec, state): # Finish test yield from finish_test(test) + + +def run_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_test(spec, state, phases=phases) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # Shared history up to final epoch of period before `fork_1` + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) + slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH + spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) + yield from select_new_head(test, spec, bid) + assert get_light_client_bootstrap(test, bid.root).spec is None + slot_period = spec.compute_sync_committee_period_at_slot(slot) + if slot_period == 0: + assert update_attested_bid(get_light_client_update_for_period(test, 0).data) == genesis_bid + else: + for period in range(0, slot_period): + assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period + state_period = spec.compute_sync_committee_period_at_slot(state.slot) + + # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch + spec_a = spec + state_a = state + slot_a = state_a.slot + bids_a = [bid] + num_sync_participants_a = 1 + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + while spec_a.get_current_epoch(state_a) <= fork_2_epoch: + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a += 4 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation + spec_b = spec + state_b = state + slot_b = state_b.slot + bids_b = [bid] + while spec_b.get_current_epoch(state_b) <= fork_2_epoch: + slot_b += 4 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b) + # Simulate that this does not become head yet, e.g., this branch was withheld + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + bids_b.append(bid_b) + + # Branch B: Another block that becomes head + attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + slot_b += 1 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + num_sync_participants_b = 1 + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) + yield from select_new_head(test, spec_b, bid_b) + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_b[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_b[-1] + bids_b.append(bid_b) + + # All data for periods between the common ancestor of the two branches should have reorged. + # As there was no sync participation on branch B, that means it is deleted. + state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + for period in range(state_period + 1, state_b_period): + assert get_light_client_update_for_period(test, period).spec is None + + # Branch A: Another block, reorging branch B once more + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a = slot_b + 1 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] + assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Data has been restored + state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + for period in range(state_period + 1, state_a_period): + assert get_light_client_update_for_period(test, period).spec is not None + + # Finish test + yield from finish_test(test) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) From 337cd1edbdd9b6889648fce989838b38925f645a Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Thu, 8 Aug 2024 17:13:06 +0200 Subject: [PATCH 07/76] ENR structure: Add `tcp6`, `quic6` and `udp6`. As discussed in ACDC #139. --- specs/phase0/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index e8c2ce9d63..fa569573f3 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -959,9 +959,9 @@ The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the The ENR MAY contain the following entries: - An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). -- A TCP port (`tcp` field) representing the local libp2p TCP listening port. -- A QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port. -- A UDP port (`udp` field) representing the local discv5 listening port. +- An IPv4 TCP port (`tcp` field) representing the local libp2p TCP listening port and/or the corresponding IPv6 port (`tcp6` field). +- An IPv4 QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port and/or the corresponding IPv6 port (`quic6` field). +- An IPv4 UDP port (`udp` field) representing the local discv5 listening port and/or the corresponding IPv6 port (`udp6` field). Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). From 946849637f89c8c182c71f5f8a16ac0fe6d216dc Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Fri, 22 Nov 2024 07:20:53 -0600 Subject: [PATCH 08/76] Fix nits in data_collection format --- tests/formats/light_client/data_collection.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md index f9c1fa7a0e..b0d17a68e9 100644 --- a/tests/formats/light_client/data_collection.md +++ b/tests/formats/light_client/data_collection.md @@ -18,7 +18,7 @@ The new block described by the test step should be imported, but does not become ```yaml { - fork_digest: string -- Encoded `ForkDigest`-context of `block` + fork_digest: string -- encoded `ForkDigest`-context of `block` data: string -- name of the `*.ssz_snappy` file to load as a `SignedBeaconBlock` object } @@ -44,26 +44,26 @@ The given block (previously imported) should become head, leading to potential u bootstraps: [ -- one entry per `LightClientBootstrap` block_root: Bytes32 -- string, hex encoded, with 0x prefix bootstrap: { -- only exists if a `LightClientBootstrap` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientBootstrap` object } ] best_updates: [ -- one entry per sync committee period - period: int, -- integer, decimal + period: int -- integer, decimal update: { -- only exists if a best `LightClientUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientUpdate` object } ] latest_finality_update: { -- only exists if a `LightClientFinalityUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientFinalityUpdate` object } latest_optimistic_update: { -- only exists if a `LightClientOptimisticUpdate` is available - fork_digest: string -- Encoded `ForkDigest`-context of `data` + fork_digest: string -- encoded `ForkDigest`-context of `data` data: string -- name of the `*.ssz_snappy` file to load as a `LightClientOptimisticUpdate` object } From 5639ca69d6ae13ffbaeafd29561e5fce448394fe Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 09:45:56 -0600 Subject: [PATCH 09/76] Rename two classes for consistency --- .../light_client/test_data_collection.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 27e8e5437c..57a7183077 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -42,13 +42,13 @@ def next_epoch_boundary_slot(spec, slot): @dataclass(frozen=True) -class BlockId(object): +class BlockID(object): slot: Any root: Any def block_to_block_id(block): - return BlockId( + return BlockID( slot=block.message.slot, root=block.message.hash_tree_root(), ) @@ -57,18 +57,18 @@ def block_to_block_id(block): def state_to_block_id(state): parent_header = state.latest_block_header.copy() parent_header.state_root = state.hash_tree_root() - return BlockId(slot=parent_header.slot, root=parent_header.hash_tree_root()) + return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) def bootstrap_bid(bootstrap): - return BlockId( + return BlockID( slot=bootstrap.header.beacon.slot, root=bootstrap.header.beacon.hash_tree_root(), ) def update_attested_bid(update): - return BlockId( + return BlockID( slot=update.attested_header.beacon.slot, root=update.attested_header.beacon.hash_tree_root(), ) @@ -136,7 +136,7 @@ class LightClientDataCache(object): # Cached data for creating future `LightClientUpdate` instances. # Key is the block ID of which the post state was used to get the data. # Data stored for the finalized head block and all non-finalized blocks. - data: Dict[BlockId, CachedLightClientData] + data: Dict[BlockID, CachedLightClientData] # Light client data for the latest slot that was signed by at least # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head @@ -147,7 +147,7 @@ class LightClientDataCache(object): @dataclass -class LightClientDataDb(object): +class LightClientDataDB(object): headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee @@ -162,7 +162,7 @@ class LightClientDataStore(object): cache: LightClientDataCache # Persistent light client data - db: LightClientDataDb + db: LightClientDataDB @dataclass @@ -179,14 +179,14 @@ class LightClientDataCollectionTest(object): states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState latest_finalized_epoch: Any # Epoch - latest_finalized_bid: BlockId + latest_finalized_bid: BlockID historical_tail_slot: Any # Slot # Light client data lc_data_store: LightClientDataStore -def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] try: block = test.blocks[bid.root] while True: @@ -198,10 +198,10 @@ def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockId] return None -def block_id_at_finalized_slot(test, slot): # -> Optional[BlockId] +def block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] while slot >= test.historical_tail_slot: try: - return BlockId(slot=slot, root=test.finalized_block_roots[slot]) + return BlockID(slot=slot, root=test.finalized_block_roots[slot]) except KeyError: slot = slot - 1 return None @@ -586,7 +586,7 @@ def setup_test(spec, state, phases=None): states={}, finalized_checkpoint_states={}, latest_finalized_epoch=state.finalized_checkpoint.epoch, - latest_finalized_bid=BlockId( + latest_finalized_bid=BlockID( slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), root=state.finalized_checkpoint.root, ), @@ -598,7 +598,7 @@ def setup_test(spec, state, phases=None): latest=ForkedLightClientFinalityUpdate(spec=None, data=None), tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), ), - db=LightClientDataDb( + db=LightClientDataDB( headers={}, current_branches={}, sync_committees={}, @@ -792,7 +792,7 @@ def test_light_client_data_collection(spec, state): test = yield from setup_test(spec, state) # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # No blocks have been imported, so no other light client data is available @@ -961,7 +961,7 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2): test = yield from setup_test(spec, state, phases=phases) # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockId(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # Shared history up to final epoch of period before `fork_1` From aff4e348354cab9be3ffadb90a4ac78eeb41cf82 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 10:43:05 -0600 Subject: [PATCH 10/76] Move bellatrix/capella tests into respective dirs --- .../test/bellatrix/light_client/__init__.py | 0 .../light_client/test_data_collection.py | 41 +++++++++++++++++++ .../light_client/test_data_collection.py | 40 ++++++++++++++++++ tests/generators/light_client/main.py | 8 +++- 4 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py new file mode 100644 index 0000000000..dced8d0b3e --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.altair.light_client.test_data_collection import ( + run_test_multi_fork +) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py new file mode 100644 index 0000000000..7911f1c320 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py @@ -0,0 +1,40 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.altair.light_client.test_data_collection import ( + run_test_multi_fork +) + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_aligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_unaligned(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 04d1d423be..2501773ac5 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,12 +9,18 @@ 'sync', 'update_ranking', ]} - bellatrix_mods = altair_mods + + _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ + 'data_collection', + ]} + bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) + deneb_mods = capella_mods electra_mods = deneb_mods From b6259a9fd7f6bca6ae89dc09f04f2f0d61638469 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 22 Nov 2024 10:59:05 -0600 Subject: [PATCH 11/76] Revert "Move bellatrix/capella tests into respective dirs" This reverts commit aff4e348354cab9be3ffadb90a4ac78eeb41cf82. --- .../test/bellatrix/light_client/__init__.py | 0 .../light_client/test_data_collection.py | 41 ------------------- .../light_client/test_data_collection.py | 40 ------------------ tests/generators/light_client/main.py | 8 +--- 4 files changed, 1 insertion(+), 88 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py delete mode 100644 tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py delete mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py deleted file mode 100644 index dced8d0b3e..0000000000 --- a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py +++ /dev/null @@ -1,41 +0,0 @@ -from eth2spec.test.context import ( - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, - with_presets, - with_state, -) -from eth2spec.test.helpers.constants import ( - BELLATRIX, CAPELLA, DENEB, - MINIMAL, -) -from eth2spec.test.altair.light_client.test_data_collection import ( - run_test_multi_fork -) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py deleted file mode 100644 index 7911f1c320..0000000000 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py +++ /dev/null @@ -1,40 +0,0 @@ -from eth2spec.test.context import ( - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, - with_presets, - with_state, -) -from eth2spec.test.helpers.constants import ( - CAPELLA, DENEB, ELECTRA, - MINIMAL, -) -from eth2spec.test.altair.light_client.test_data_collection import ( - run_test_multi_fork -) - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 2501773ac5..04d1d423be 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,18 +9,12 @@ 'sync', 'update_ranking', ]} - - _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ - 'data_collection', - ]} - bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) + bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ - 'data_collection', 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods electra_mods = deneb_mods From e00e866b84cb5b1b3a5fd25ef9af6d43088cb479 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 13:15:08 +0100 Subject: [PATCH 12/76] Synchronise capitalization change request across files --- tests/formats/light_client/sync.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/formats/light_client/sync.md b/tests/formats/light_client/sync.md index 1706b4c162..c6e62a7c8b 100644 --- a/tests/formats/light_client/sync.md +++ b/tests/formats/light_client/sync.md @@ -9,8 +9,8 @@ This series of tests provides reference test vectors for validating that a light ```yaml genesis_validators_root: Bytes32 -- string, hex encoded, with 0x prefix trusted_block_root: Bytes32 -- string, hex encoded, with 0x prefix -bootstrap_fork_digest: string -- Encoded `ForkDigest`-context of `bootstrap` -store_fork_digest: string -- Encoded `ForkDigest`-context of `store` object being tested +bootstrap_fork_digest: string -- encoded `ForkDigest`-context of `bootstrap` +store_fork_digest: string -- encoded `ForkDigest`-context of `store` object being tested ``` ### `bootstrap.ssz_snappy` @@ -60,7 +60,7 @@ The function `process_light_client_update(store, update, current_slot, genesis_v ```yaml { - update_fork_digest: string -- Encoded `ForkDigest`-context of `update` + update_fork_digest: string -- encoded `ForkDigest`-context of `update` update: string -- name of the `*.ssz_snappy` file to load as a `LightClientUpdate` object current_slot: int -- integer, decimal @@ -78,7 +78,7 @@ The `store` should be upgraded to reflect the new `store_fork_digest`: ```yaml { - store_fork_digest: string -- Encoded `ForkDigest`-context of `store` + store_fork_digest: string -- encoded `ForkDigest`-context of `store` checks: {: value} -- the assertions. } ``` From 84bef3c6881edaa4892362461433a2de3f848e52 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 13:50:37 +0100 Subject: [PATCH 13/76] Split LC sync test into multiple files --- .../test/altair/light_client/test_sync.py | 460 +----------------- .../test/capella/light_client/test_sync.py | 36 ++ .../test/deneb/light_client/__init__.py | 0 .../test/deneb/light_client/test_sync.py | 50 ++ .../test/electra/light_client/__init__.py | 0 .../test/electra/light_client/test_sync.py | 64 +++ .../test/helpers/light_client_sync.py | 342 +++++++++++++ 7 files changed, 505 insertions(+), 447 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py create mode 100644 tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 45c7d77887..8000ceb799 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,14 +1,6 @@ -from typing import (Any, Dict, List) - -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, with_presets, - with_state, with_light_client, ) from eth2spec.test.helpers.attestations import ( @@ -16,23 +8,17 @@ state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.fork_transition import ( - do_fork, - transition_across_forks, -) -from eth2spec.test.helpers.forks import ( - get_spec_for_fork_version, - is_post_capella, is_post_deneb, is_post_electra, -) from eth2spec.test.helpers.light_client import ( - compute_start_slot_at_next_sync_committee_period, get_sync_aggregate, - upgrade_lc_bootstrap_to_new_spec, - upgrade_lc_update_to_new_spec, - upgrade_lc_store_to_new_spec, + compute_start_slot_at_next_sync_committee_period, +) +from eth2spec.test.helpers.light_client_sync import ( + emit_force_update, + emit_update, + finish_lc_sync_test, + setup_lc_sync_test, ) from eth2spec.test.helpers.state import ( next_slots, @@ -40,162 +26,12 @@ ) -class LightClientSyncTest(object): - steps: List[Dict[str, Any]] - genesis_validators_root: Any - s_spec: Any - store: Any - - -def get_store_fork_version(s_spec): - if is_post_electra(s_spec): - return s_spec.config.ELECTRA_FORK_VERSION - if is_post_deneb(s_spec): - return s_spec.config.DENEB_FORK_VERSION - if is_post_capella(s_spec): - return s_spec.config.CAPELLA_FORK_VERSION - return s_spec.config.ALTAIR_FORK_VERSION - - -def setup_test(spec, state, s_spec=None, phases=None): - test = LightClientSyncTest() - test.steps = [] - - if s_spec is None: - s_spec = spec - if phases is None: - phases = { - spec.fork: spec, - s_spec.fork: s_spec, - } - test.s_spec = s_spec - - yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() - test.genesis_validators_root = state.genesis_validators_root - - next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) - trusted_block = state_transition_with_full_block(spec, state, True, True) - trusted_block_root = trusted_block.message.hash_tree_root() - yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() - - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_bootstrap(state, trusted_block) - yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) - yield "bootstrap", data - - upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) - test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - yield "store_fork_digest", "meta", encode_hex(store_fork_digest) - - return test - - -def finish_test(test): - yield "steps", test.steps - - -def get_update_file_name(d_spec, update): - if d_spec.is_sync_committee_update(update): - suffix1 = "s" - else: - suffix1 = "x" - if d_spec.is_finality_update(update): - suffix2 = "f" - else: - suffix2 = "x" - return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" - - -def get_checks(s_spec, store): - if is_post_capella(s_spec): - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), - }, - } - - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - }, - } - - -def emit_force_update(test, spec, state): - current_slot = state.slot - test.s_spec.process_light_client_store_force_update(test.store, current_slot) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "force_update": { - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - - -def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) - if not with_next: - data.next_sync_committee = spec.SyncCommittee() - data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() - current_slot = state.slot - - upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) - test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) - - yield get_update_file_name(d_spec, data), data - test.steps.append({ - "process_update": { - "update_fork_digest": encode_hex(data_fork_digest), - "update": get_update_file_name(d_spec, data), - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - return upgraded - - -def emit_upgrade_store(test, new_s_spec, phases=None): - test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) - test.s_spec = new_s_spec - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "upgrade_store": { - "store_fork_digest": encode_hex(store_fork_digest), - "checks": get_checks(test.s_spec, test.store), - } - }) - - @with_light_client @spec_state_test_with_matching_config @with_presets([MINIMAL], reason="too slow") def test_light_client_sync(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` # ``` @@ -409,7 +245,7 @@ def test_light_client_sync(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -428,7 +264,7 @@ def test_supply_sync_committee_from_past_update(spec, state): past_state = state.copy() # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) assert not spec.is_next_sync_committee_known(test.store) # Apply `LightClientUpdate` from the past, populating `store.next_sync_committee` @@ -439,7 +275,7 @@ def test_supply_sync_committee_from_past_update(spec, state): assert test.store.optimistic_header.beacon.slot == state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -447,7 +283,7 @@ def test_supply_sync_committee_from_past_update(spec, state): @with_presets([MINIMAL], reason="too slow") def test_advance_finality_without_sync_committee(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1) @@ -515,274 +351,4 @@ def test_advance_finality_without_sync_committee(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) - - -def run_test_single_fork(spec, phases, state, fork): - # Start test - test = yield from setup_test(spec, state, phases=phases) - - # Initial `LightClientUpdate` - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to two slots before fork - fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - update = yield from emit_update( - test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Perform `LightClientStore` upgrade - yield from emit_upgrade_store(test, phases[fork], phases=phases) - update = test.store.best_valid_update - - # Final slot before fork, check that importing the pre-fork format still works - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Upgrade to post-fork spec, attested block is still before the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) - spec = phases[fork] - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Another block after the fork, this time attested block is after the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to next epoch - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finalize the fork - finalized_block = block.copy() - finalized_state = state.copy() - _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=CAPELLA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, CAPELLA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, DENEB) - - -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) -@spec_test -@with_config_overrides({ - 'ELECTRA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, ELECTRA) - - -def run_test_multi_fork(spec, phases, state, fork_1, fork_2): - # Start test - test = yield from setup_test(spec, state, phases[fork_2], phases) - - # Set up so that finalized is from `spec`, ... - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - - # ..., attested is from `fork_1`, ... - fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') - spec, state, attested_block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_1_epoch), - phases, - with_block=True, - ) - attested_state = state.copy() - - # ..., and signature is from `fork_2` - fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') - spec, state, _ = transition_across_forks( - spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - spec, state, block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_2_epoch), - phases, - with_block=True, - sync_aggregate=sync_aggregate, - ) - - # Check that update applies - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, - 'ELECTRA_FORK_EPOCH': 5, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'ELECTRA_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork): - # Start test (Legacy bootstrap with an upgraded store) - test = yield from setup_test(spec, state, phases[fork], phases) - - # Initial `LightClientUpdate` (check that the upgraded store can process it) - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=CAPELLA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) + yield from finish_lc_sync_test(test) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py new file mode 100644 index 0000000000..3958900be5 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -0,0 +1,36 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) + + +@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py new file mode 100644 index 0000000000..d19e1e0238 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -0,0 +1,50 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py new file mode 100644 index 0000000000..2b20552d6b --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py @@ -0,0 +1,64 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, + run_lc_sync_test_upgraded_store_with_legacy_data, +) + +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_config_overrides({ + 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, + 'ELECTRA_FORK_EPOCH': 5, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py new file mode 100644 index 0000000000..e64b0a2eca --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -0,0 +1,342 @@ +from typing import (Any, Dict, List) + +from eth_utils import encode_hex +from eth2spec.test.helpers.attestations import ( + next_slots_with_attestations, + state_transition_with_full_block, +) +from eth2spec.test.helpers.fork_transition import ( + do_fork, + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + get_spec_for_fork_version, + is_post_capella, is_post_deneb, is_post_electra, +) +from eth2spec.test.helpers.light_client import ( + get_sync_aggregate, + upgrade_lc_bootstrap_to_new_spec, + upgrade_lc_update_to_new_spec, + upgrade_lc_store_to_new_spec, +) +from eth2spec.test.helpers.state import ( + next_slots, + transition_to, +) + + +class LightClientSyncTest(object): + steps: List[Dict[str, Any]] + genesis_validators_root: Any + s_spec: Any + store: Any + + +def _get_store_fork_version(s_spec): + if is_post_electra(s_spec): + return s_spec.config.ELECTRA_FORK_VERSION + if is_post_deneb(s_spec): + return s_spec.config.DENEB_FORK_VERSION + if is_post_capella(s_spec): + return s_spec.config.CAPELLA_FORK_VERSION + return s_spec.config.ALTAIR_FORK_VERSION + + +def setup_lc_sync_test(spec, state, s_spec=None, phases=None): + test = LightClientSyncTest() + test.steps = [] + + if s_spec is None: + s_spec = spec + if phases is None: + phases = { + spec.fork: spec, + s_spec.fork: s_spec, + } + test.s_spec = s_spec + + yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() + test.genesis_validators_root = state.genesis_validators_root + + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) + trusted_block = state_transition_with_full_block(spec, state, True, True) + trusted_block_root = trusted_block.message.hash_tree_root() + yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() + + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_bootstrap(state, trusted_block) + yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) + yield "bootstrap", data + + upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) + test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + yield "store_fork_digest", "meta", encode_hex(store_fork_digest) + + return test + + +def finish_lc_sync_test(test): + yield "steps", test.steps + + +def _get_update_file_name(d_spec, update): + if d_spec.is_sync_committee_update(update): + suffix1 = "s" + else: + suffix1 = "x" + if d_spec.is_finality_update(update): + suffix2 = "f" + else: + suffix2 = "x" + return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" + + +def _get_checks(s_spec, store): + if is_post_capella(s_spec): + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), + }, + } + + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + }, + } + + +def emit_force_update(test, spec, state): + current_slot = state.slot + test.s_spec.process_light_client_store_force_update(test.store, current_slot) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "force_update": { + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) + if not with_next: + data.next_sync_committee = spec.SyncCommittee() + data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() + current_slot = state.slot + + upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) + test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) + + yield _get_update_file_name(d_spec, data), data + test.steps.append({ + "process_update": { + "update_fork_digest": encode_hex(data_fork_digest), + "update": _get_update_file_name(d_spec, data), + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + return upgraded + + +def _emit_upgrade_store(test, new_s_spec, phases=None): + test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) + test.s_spec = new_s_spec + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "upgrade_store": { + "store_fork_digest": encode_hex(store_fork_digest), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def run_lc_sync_test_single_fork(spec, phases, state, fork): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases=phases) + + # Initial `LightClientUpdate` + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to two slots before fork + fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + update = yield from emit_update( + test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Perform `LightClientStore` upgrade + yield from _emit_upgrade_store(test, phases[fork], phases=phases) + update = test.store.best_valid_update + + # Final slot before fork, check that importing the pre-fork format still works + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Upgrade to post-fork spec, attested block is still before the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) + spec = phases[fork] + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Another block after the fork, this time attested block is after the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to next epoch + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finalize the fork + finalized_block = block.copy() + finalized_state = state.copy() + _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases[fork_2], phases) + + # Set up so that finalized is from `spec`, ... + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + + # ..., attested is from `fork_1`, ... + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + spec, state, attested_block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_1_epoch), + phases, + with_block=True, + ) + attested_state = state.copy() + + # ..., and signature is from `fork_2` + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + spec, state, _ = transition_across_forks( + spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + spec, state, block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_2_epoch), + phases, + with_block=True, + sync_aggregate=sync_aggregate, + ) + + # Check that update applies + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): + # Start test (Legacy bootstrap with an upgraded store) + test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) + + # Initial `LightClientUpdate` (check that the upgraded store can process it) + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) From 75c65e63bf1636011166fb65db50fc3a1830bcb4 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:25:20 +0100 Subject: [PATCH 14/76] Split LC data collection test into multiple files --- .../light_client/test_data_collection.py | 1047 +---------------- .../light_client/test_data_collection.py | 40 + .../light_client/test_data_collection.py | 41 + .../helpers/light_client_data_collection.py | 897 ++++++++++++++ 4 files changed, 1032 insertions(+), 993 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py create mode 100644 tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py index 57a7183077..af73b26345 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -1,799 +1,36 @@ -from typing import (Any, Dict, List, Set) -from dataclasses import dataclass - -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, - spec_test, - with_config_overrides, - with_matching_spec_config, - with_phases, with_presets, - with_state, with_light_client, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.fork_transition import ( - transition_across_forks, -) -from eth2spec.test.helpers.forks import ( - is_post_altair, -) -from eth2spec.test.helpers.light_client import ( - compute_start_slot_at_sync_committee_period, - get_sync_aggregate, - latest_current_sync_committee_gindex, - latest_finalized_root_gindex, - latest_next_sync_committee_gindex, - latest_normalize_merkle_branch, - upgrade_lc_header_to_new_spec, - upgrade_lc_update_to_new_spec, +from eth2spec.test.helpers.light_client_data_collection import ( + add_new_block, + finish_lc_data_collection_test, + get_lc_bootstrap_block_id, + get_lc_update_attested_block_id, + get_light_client_bootstrap, + get_light_client_finality_update, + get_light_client_optimistic_update, + get_light_client_update_for_period, + select_new_head, + setup_lc_data_collection_test, + BlockID, ) -def next_epoch_boundary_slot(spec, slot): - # Compute the first possible epoch boundary state slot of a `Checkpoint` - # referring to a block at given slot. - epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) - return spec.compute_start_slot_at_epoch(epoch) - - -@dataclass(frozen=True) -class BlockID(object): - slot: Any - root: Any - - -def block_to_block_id(block): - return BlockID( - slot=block.message.slot, - root=block.message.hash_tree_root(), - ) - - -def state_to_block_id(state): - parent_header = state.latest_block_header.copy() - parent_header.state_root = state.hash_tree_root() - return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) - - -def bootstrap_bid(bootstrap): - return BlockID( - slot=bootstrap.header.beacon.slot, - root=bootstrap.header.beacon.hash_tree_root(), - ) - - -def update_attested_bid(update): - return BlockID( - slot=update.attested_header.beacon.slot, - root=update.attested_header.beacon.hash_tree_root(), - ) - - -@dataclass -class ForkedBeaconState(object): - spec: Any - data: Any - - -@dataclass -class ForkedSignedBeaconBlock(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientHeader(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientBootstrap(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientUpdate(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientFinalityUpdate(object): - spec: Any - data: Any - - -@dataclass -class ForkedLightClientOptimisticUpdate(object): - spec: Any - data: Any - - -@dataclass -class CachedLightClientData(object): - # Sync committee branches at block's post-state - current_sync_committee_branch: Any # CurrentSyncCommitteeBranch - next_sync_committee_branch: Any # NextSyncCommitteeBranch - - # Finality information at block's post-state - finalized_slot: Any # Slot - finality_branch: Any # FinalityBranch - - # Best / latest light client data - current_period_best_update: ForkedLightClientUpdate - latest_signature_slot: Any # Slot - - -@dataclass -class LightClientDataCache(object): - # Cached data for creating future `LightClientUpdate` instances. - # Key is the block ID of which the post state was used to get the data. - # Data stored for the finalized head block and all non-finalized blocks. - data: Dict[BlockID, CachedLightClientData] - - # Light client data for the latest slot that was signed by at least - # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head - latest: ForkedLightClientFinalityUpdate - - # The earliest slot for which light client data is imported - tail_slot: Any # Slot - - -@dataclass -class LightClientDataDB(object): - headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader - current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch - sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee - best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate - - -@dataclass -class LightClientDataStore(object): - spec: Any - - # Cached data to accelerate creating light client data - cache: LightClientDataCache - - # Persistent light client data - db: LightClientDataDB - - -@dataclass -class LightClientDataCollectionTest(object): - steps: List[Dict[str, Any]] - files: Set[str] - - # Fork schedule - phases: Any - - # History access - blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock - finalized_block_roots: Dict[Any, Any] # Slot -> Root - states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState - finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState - latest_finalized_epoch: Any # Epoch - latest_finalized_bid: BlockID - historical_tail_slot: Any # Slot - - # Light client data - lc_data_store: LightClientDataStore - - -def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] - try: - block = test.blocks[bid.root] - while True: - if block.data.message.slot <= slot: - return block_to_block_id(block.data) - - block = test.blocks[block.data.message.parent_root] - except KeyError: - return None - - -def block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] - while slot >= test.historical_tail_slot: - try: - return BlockID(slot=slot, root=test.finalized_block_roots[slot]) - except KeyError: - slot = slot - 1 - return None - - -def get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] - low_slot = max( - test.historical_tail_slot, - test.lc_data_store.spec.compute_start_slot_at_epoch( - test.lc_data_store.spec.config.ALTAIR_FORK_EPOCH) - ) - if period < test.lc_data_store.spec.compute_sync_committee_period_at_slot(low_slot): - return None - period_start_slot = compute_start_slot_at_sync_committee_period(test.lc_data_store.spec, period) - sync_committee_slot = max(period_start_slot, low_slot) - bid = block_id_at_finalized_slot(test, sync_committee_slot) - if bid is None: - return None - block = test.blocks[bid.root] - state = test.finalized_checkpoint_states[block.data.message.state_root] - if sync_committee_slot > state.data.slot: - state.spec, state.data, _ = transition_across_forks( - state.spec, state.data, sync_committee_slot, phases=test.phases) - assert is_post_altair(state.spec) - return state.data.current_sync_committee - - -def light_client_header_for_block(test, block): # -> ForkedLightClientHeader - if not is_post_altair(block.spec): - spec = test.phases[ALTAIR] - else: - spec = block.spec - return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) - - -def light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader - block = test.blocks[bid.root] - if not is_post_altair(block.spec): - spec = test.phases[ALTAIR] - else: - spec = block.spec - return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) - - -def sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] - block = test.blocks[bid.root] - if not is_post_altair(block.spec): - return None - return block.data.message.body.sync_aggregate - - -def get_light_client_data(lc_data_store, bid): # -> CachedLightClientData - # Fetch cached light client data about a given block. - # Data must be cached (`cache_light_client_data`) before calling this function. - try: - return lc_data_store.cache.data[bid] - except KeyError: - raise ValueError("Trying to get light client data that was not cached") - - -def cache_light_client_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): - # Cache data for a given block and its post-state to speed up creating future - # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this - # block and state. - cached_data = CachedLightClientData( - current_sync_committee_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.current_sync_committee_gindex_at_slot(state.slot)), - latest_current_sync_committee_gindex(lc_data_store.spec)), - next_sync_committee_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.next_sync_committee_gindex_at_slot(state.slot)), - latest_next_sync_committee_gindex(lc_data_store.spec)), - finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), - finality_branch=latest_normalize_merkle_branch( - lc_data_store.spec, - spec.compute_merkle_proof(state, spec.finalized_root_gindex_at_slot(state.slot)), - latest_finalized_root_gindex(lc_data_store.spec)), - current_period_best_update=current_period_best_update, - latest_signature_slot=latest_signature_slot, - ) - if bid in lc_data_store.cache.data: - raise ValueError("Redundant `cache_light_client_data` call") - lc_data_store.cache.data[bid] = cached_data - - -def delete_light_client_data(lc_data_store, bid): - # Delete cached light client data for a given block. This needs to be called - # when a block becomes unreachable due to finalization of a different fork. - del lc_data_store.cache.data[bid] - - -def create_light_client_finality_update_from_light_client_data(test, - attested_bid, - signature_slot, - sync_aggregate): # -> ForkedLightClientFinalityUpdate - attested_header = light_client_header_for_block_id(test, attested_bid) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - finalized_bid = block_id_at_finalized_slot(test, attested_data.finalized_slot) - if finalized_bid is not None: - if finalized_bid.slot != attested_data.finalized_slot: - # Empty slots at end of epoch, update cache for latest block slot - attested_data.finalized_slot = finalized_bid.slot - if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: - finalized_header = ForkedLightClientHeader( - spec=attested_header.spec, - data=attested_header.spec.LightClientHeader(), - ) - else: - finalized_header = light_client_header_for_block_id(test, finalized_bid) - finalized_header = ForkedLightClientHeader( - spec=attested_header.spec, - data=upgrade_lc_header_to_new_spec( - finalized_header.spec, - attested_header.spec, - finalized_header.data, - ) - ) - finality_branch = attested_data.finality_branch - return ForkedLightClientFinalityUpdate( - spec=attested_header.spec, - data=attested_header.spec.LightClientFinalityUpdate( - attested_header=attested_header.data, - finalized_header=finalized_header.data, - finality_branch=finality_branch, - sync_aggregate=sync_aggregate, - signature_slot=signature_slot, - ), - ) - - -def create_light_client_update_from_light_client_data(test, - attested_bid, - signature_slot, - sync_aggregate, - next_sync_committee): # -> ForkedLightClientUpdate - finality_update = create_light_client_finality_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - return ForkedLightClientUpdate( - spec=finality_update.spec, - data=finality_update.spec.LightClientUpdate( - attested_header=finality_update.data.attested_header, - next_sync_committee=next_sync_committee, - next_sync_committee_branch=attested_data.next_sync_committee_branch, - finalized_header=finality_update.data.finalized_header, - finality_branch=finality_update.data.finality_branch, - sync_aggregate=finality_update.data.sync_aggregate, - signature_slot=finality_update.data.signature_slot, - ) - ) - - -def create_light_client_update(test, spec, state, block, parent_bid): - # Create `LightClientUpdate` instances for a given block and its post-state, - # and keep track of best / latest ones. Data about the parent block's - # post-state must be cached (`cache_light_client_data`) before calling this. - - # Verify attested block (parent) is recent enough and that state is available - attested_bid = parent_bid - attested_slot = attested_bid.slot - if attested_slot < test.lc_data_store.cache.tail_slot: - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), - latest_signature_slot=spec.GENESIS_SLOT, - ) - return - - # If sync committee period changed, reset `best` - attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) - signature_slot = block.message.slot - signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) - attested_data = get_light_client_data(test.lc_data_store, attested_bid) - if attested_period != signature_period: - best = ForkedLightClientUpdate(spec=None, data=None) - else: - best = attested_data.current_period_best_update - - # If sync committee does not have sufficient participants, do not bump latest - sync_aggregate = block.message.body.sync_aggregate - num_active_participants = sum(sync_aggregate.sync_committee_bits) - if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: - latest_signature_slot = attested_data.latest_signature_slot - else: - latest_signature_slot = signature_slot - - # To update `best`, sync committee must have sufficient participants, and - # `signature_slot` must be in `attested_slot`'s sync committee period - if ( - num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS - or attested_period != signature_period - ): - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=best, - latest_signature_slot=latest_signature_slot, - ) - return - - # Check if light client data improved - update = create_light_client_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) - is_better = ( - best.spec is None - or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec( - best.spec, update.spec, best.data, test.phases)) - ) - - # Update best light client data for current sync committee period - if is_better: - best = update - cache_light_client_data( - test.lc_data_store, - spec, - state, - block_to_block_id(block), - current_period_best_update=best, - latest_signature_slot=latest_signature_slot, - ) - - -def create_light_client_bootstrap(test, spec, bid): - block = test.blocks[bid.root] - period = spec.compute_sync_committee_period_at_slot(bid.slot) - if period not in test.lc_data_store.db.sync_committees: - test.lc_data_store.db.sync_committees[period] = \ - get_current_sync_committee_for_finalized_period(test, period) - test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( - spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) - test.lc_data_store.db.current_branches[bid.slot] = \ - get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch - - -def process_new_block_for_light_client(test, spec, state, block, parent_bid): - # Update light client data with information from a new block. - if block.message.slot < test.lc_data_store.cache.tail_slot: - return - - if is_post_altair(spec): - create_light_client_update(test, spec, state, block, parent_bid) - else: - raise ValueError("`tail_slot` cannot be before Altair") - - -def process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): - # Update light client data to account for a new head block. - # Note that `old_finalized_bid` is not yet updated when this is called. - if head_bid.slot < test.lc_data_store.cache.tail_slot: - return - - # Commit best light client data for non-finalized periods - head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) - low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) - low_period = spec.compute_sync_committee_period_at_slot(low_slot) - bid = head_bid - for period in reversed(range(low_period, head_period + 1)): - period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 - bid = get_ancestor_of_block_id(test, bid, period_end_slot) - if bid is None or bid.slot < low_slot: - break - best = get_light_client_data(test.lc_data_store, bid).current_period_best_update - if ( - best.spec is None - or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS - ): - test.lc_data_store.db.best_updates.pop(period, None) - else: - test.lc_data_store.db.best_updates[period] = best - - # Update latest light client data - head_data = get_light_client_data(test.lc_data_store, head_bid) - signature_slot = head_data.latest_signature_slot - if signature_slot <= low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) - if signature_bid is None or signature_bid.slot <= low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) - if attested_bid is None or attested_bid.slot < low_slot: - test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) - return - sync_aggregate = sync_aggregate_for_block_id(test, signature_bid) - assert sync_aggregate is not None - test.lc_data_store.cache.latest = create_light_client_finality_update_from_light_client_data( - test, attested_bid, signature_slot, sync_aggregate) - - -def process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): - # Prune cached data that is no longer useful for creating future - # `LightClientUpdate` and `LightClientBootstrap` instances. - # This needs to be called whenever `finalized_checkpoint` changes. - finalized_slot = finalized_bid.slot - if finalized_slot < test.lc_data_store.cache.tail_slot: - return - - # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks - first_new_slot = old_finalized_bid.slot + 1 - low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) - boundary_slot = finalized_slot - while boundary_slot >= low_slot: - bid = block_id_at_finalized_slot(test, boundary_slot) - if bid is None: - break - if bid.slot >= low_slot: - create_light_client_bootstrap(test, spec, bid) - boundary_slot = next_epoch_boundary_slot(spec, bid.slot) - if boundary_slot < spec.SLOTS_PER_EPOCH: - break - boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH - - # Prune light client data that is no longer referrable by future updates - bids_to_delete = [] - for bid in test.lc_data_store.cache.data: - if bid.slot >= finalized_bid.slot: - continue - bids_to_delete.append(bid) - for bid in bids_to_delete: - delete_light_client_data(test.lc_data_store, bid) - - -def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap - try: - header = test.lc_data_store.db.headers[block_root] - except KeyError: - return ForkedLightClientBootstrap(spec=None, data=None) - - slot = header.data.beacon.slot - period = header.spec.compute_sync_committee_period_at_slot(slot) - return ForkedLightClientBootstrap( - spec=header.spec, - data=header.spec.LightClientBootstrap( - header=header.data, - current_sync_committee=test.lc_data_store.db.sync_committees[period], - current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], - ) - ) - - -def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate - try: - return test.lc_data_store.db.best_updates[period] - except KeyError: - return ForkedLightClientUpdate(spec=None, data=None) - - -def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate - return test.lc_data_store.cache.latest - - -def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate - finality_update = get_light_client_finality_update(test) - if finality_update.spec is None: - return ForkedLightClientOptimisticUpdate(spec=None, data=None) - return ForkedLightClientOptimisticUpdate( - spec=finality_update.spec, - data=finality_update.spec.LightClientOptimisticUpdate( - attested_header=finality_update.data.attested_header, - sync_aggregate=finality_update.data.sync_aggregate, - signature_slot=finality_update.data.signature_slot, - ), - ) - - -def setup_test(spec, state, phases=None): - assert spec.compute_slots_since_epoch_start(state.slot) == 0 - - test = LightClientDataCollectionTest( - steps=[], - files=set(), - phases=phases, - blocks={}, - finalized_block_roots={}, - states={}, - finalized_checkpoint_states={}, - latest_finalized_epoch=state.finalized_checkpoint.epoch, - latest_finalized_bid=BlockID( - slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), - root=state.finalized_checkpoint.root, - ), - historical_tail_slot=state.slot, - lc_data_store=LightClientDataStore( - spec=spec, - cache=LightClientDataCache( - data={}, - latest=ForkedLightClientFinalityUpdate(spec=None, data=None), - tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), - ), - db=LightClientDataDB( - headers={}, - current_branches={}, - sync_committees={}, - best_updates={}, - ), - ), - ) - bid = state_to_block_id(state) - yield "initial_state", state - test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( - message=spec.BeaconBlock(state_root=state.hash_tree_root()), - )) - test.finalized_block_roots[bid.slot] = bid.root - test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) - test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) - cache_light_client_data( - test.lc_data_store, spec, state, bid, - current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), - latest_signature_slot=spec.GENESIS_SLOT, - ) - create_light_client_bootstrap(test, spec, bid) - - return test - - -def finish_test(test): - yield "steps", test.steps - - -def encode_object(test, prefix, obj, slot, genesis_validators_root): - yield from [] # Consistently enable `yield from` syntax in calling tests - - file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" - if file_name not in test.files: - test.files.add(file_name) - yield file_name, obj.data - return { - "fork_digest": encode_hex(obj.spec.compute_fork_digest( - obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), - genesis_validators_root, - )), - "data": file_name, - } - - -def add_new_block(test, spec, state, slot=None, num_sync_participants=0): - if slot is None: - slot = state.slot + 1 - assert slot > state.slot - parent_bid = state_to_block_id(state) - - # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed - if state.slot < slot - 1: - spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) - - # Compute sync aggregate, using: - # - sync committee based on target slot - # - fork digest based on target slot - 1 - # - signed data based on parent_bid.slot - # All three slots may be from different forks - sync_aggregate, signature_slot = get_sync_aggregate( - spec, state, num_participants=num_sync_participants, phases=test.phases) - assert signature_slot == slot - - # Apply final block with computed sync aggregate - spec, state, block = transition_across_forks( - spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) - bid = block_to_block_id(block) - test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) - test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) - process_new_block_for_light_client(test, spec, state, block, parent_bid) - block_obj = yield from encode_object( - test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, - state.genesis_validators_root, - ) - test.steps.append({ - "new_block": block_obj - }) - return spec, state, bid - - -def select_new_head(test, spec, head_bid): - old_finalized_bid = test.latest_finalized_bid - process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) - - # Process finalization - block = test.blocks[head_bid.root] - state = test.states[block.data.message.state_root] - if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: - block = test.blocks[state.data.finalized_checkpoint.root] - bid = block_to_block_id(block.data) - new_finalized_bid = bid - if new_finalized_bid.slot > old_finalized_bid.slot: - old_finalized_epoch = None - new_finalized_epoch = state.data.finalized_checkpoint.epoch - while bid.slot > test.latest_finalized_bid.slot: - test.finalized_block_roots[bid.slot] = bid.root - finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) - if finalized_epoch != old_finalized_epoch: - state = test.states[block.data.message.state_root] - test.finalized_checkpoint_states[block.data.message.state_root] = state - old_finalized_epoch = finalized_epoch - block = test.blocks[block.data.message.parent_root] - bid = block_to_block_id(block.data) - test.latest_finalized_epoch = new_finalized_epoch - test.latest_finalized_bid = new_finalized_bid - process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) - - blocks_to_delete = [] - for block_root, block in test.blocks.items(): - if block.data.message.slot < new_finalized_bid.slot: - blocks_to_delete.append(block_root) - for block_root in blocks_to_delete: - del test.blocks[block_root] - states_to_delete = [] - for state_root, state in test.states.items(): - if state.data.slot < new_finalized_bid.slot: - states_to_delete.append(state_root) - for state_root in states_to_delete: - del test.states[state_root] - - yield from [] # Consistently enable `yield from` syntax in calling tests - - bootstraps = [] - for state in test.finalized_checkpoint_states.values(): - bid = state_to_block_id(state.data) - entry = { - "block_root": encode_hex(bid.root), - } - bootstrap = get_light_client_bootstrap(test, bid.root) - if bootstrap.spec is not None: - bootstrap_obj = yield from encode_object( - test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, - state.data.genesis_validators_root, - ) - entry["bootstrap"] = bootstrap_obj - bootstraps.append(entry) - - best_updates = [] - low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) - head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) - for period in range(low_period, head_period + 1): - entry = { - "period": int(period), - } - update = get_light_client_update_for_period(test, period) - if update.spec is not None: - update_obj = yield from encode_object( - test, "update", update, update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - entry["update"] = update_obj - best_updates.append(entry) - - checks = { - "latest_finalized_checkpoint": { - "epoch": int(test.latest_finalized_epoch), - "root": encode_hex(test.latest_finalized_bid.root), - }, - "bootstraps": bootstraps, - "best_updates": best_updates, - } - finality_update = get_light_client_finality_update(test) - if finality_update.spec is not None: - finality_update_obj = yield from encode_object( - test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - checks["latest_finality_update"] = finality_update_obj - optimistic_update = get_light_client_optimistic_update(test) - if optimistic_update.spec is not None: - optimistic_update_obj = yield from encode_object( - test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, - state.data.genesis_validators_root, - ) - checks["latest_optimistic_update"] = optimistic_update_obj - - test.steps.append({ - "new_head": { - "head_block_root": encode_hex(head_bid.root), - "checks": checks, - } - }) - - @with_light_client @spec_state_test_with_matching_config @with_presets([MINIMAL], reason="too slow") def test_light_client_data_collection(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_data_collection_test(spec, state) # Genesis block is post Altair and is finalized, so can be used as bootstrap genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) - assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid # No blocks have been imported, so no other light client data is available period = spec.compute_sync_committee_period_at_slot(state.slot) @@ -813,9 +50,9 @@ def test_light_client_data_collection(spec, state): spec_b, state_b, bid_2 = yield from add_new_block(test, spec, state, slot=2, num_sync_participants=1) yield from select_new_head(test, spec_b, bid_2) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid - assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid # Build on branch A, once more with an empty sync aggregate spec_a, state_a, bid_3 = yield from add_new_block(test, spec_a, state_a, slot=3) @@ -829,33 +66,33 @@ def test_light_client_data_collection(spec, state): spec_b, state_b, bid_4 = yield from add_new_block(test, spec_b, state_b, slot=4) yield from select_new_head(test, spec_b, bid_4) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == genesis_bid - assert update_attested_bid(get_light_client_optimistic_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid # Build on branch B, once more with 1 participant spec_b, state_b, bid_5 = yield from add_new_block(test, spec_b, state_b, slot=5, num_sync_participants=1) yield from select_new_head(test, spec_b, bid_5) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == genesis_bid - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4 # Build on branch B, this time with 3 participants spec_b, state_b, bid_6 = yield from add_new_block(test, spec_b, state_b, slot=6, num_sync_participants=3) yield from select_new_head(test, spec_b, bid_6) period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_5 # Build on branch A, with 2 participants spec_a, state_a, bid_7 = yield from add_new_block(test, spec_a, state_a, slot=7, num_sync_participants=2) yield from select_new_head(test, spec_a, bid_7) period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_3 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3 # Branch A: epoch 1, slot 5 slot = spec_a.compute_start_slot_at_epoch(1) + 5 @@ -864,9 +101,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_7.root).spec is None assert get_light_client_bootstrap(test, bid_1_5.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_7 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_7 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_7 # Branch B: epoch 2, slot 4 slot = spec_b.compute_start_slot_at_epoch(2) + 4 @@ -876,9 +113,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_1_5.root).spec is None assert get_light_client_bootstrap(test, bid_2_4.root).spec is None period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_6 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_6 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_6 # Branch A: epoch 3, slot 0 slot = spec_a.compute_start_slot_at_epoch(3) + 0 @@ -889,9 +126,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_2_4.root).spec is None assert get_light_client_bootstrap(test, bid_3_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 # Branch A: fill epoch for i in range(1, spec_a.SLOTS_PER_EPOCH): @@ -902,9 +139,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_2_4.root).spec is None assert get_light_client_bootstrap(test, bid_3_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_1_5 - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 assert state_a.slot == spec_a.compute_start_slot_at_epoch(4) - 1 bid_3_n = bid_a @@ -918,9 +155,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_3_0.root).spec is None assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n # Branch A: fill epoch for i in range(1, spec_a.SLOTS_PER_EPOCH): @@ -932,9 +169,9 @@ def test_light_client_data_collection(spec, state): assert get_light_client_bootstrap(test, bid_3_0.root).spec is None assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_3_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n assert state_a.slot == spec_a.compute_start_slot_at_epoch(5) - 1 bid_4_n = bid_a @@ -942,191 +179,15 @@ def test_light_client_data_collection(spec, state): slot = spec_a.compute_start_slot_at_epoch(6) + 2 spec_a, state_a, bid_6_2 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) yield from select_new_head(test, spec_a, bid_6_2) - assert bootstrap_bid(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 - assert bootstrap_bid(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 assert get_light_client_bootstrap(test, bid_2_4.root).spec is None - assert bootstrap_bid(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 assert get_light_client_bootstrap(test, bid_4_0.root).spec is None period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - assert update_attested_bid(get_light_client_update_for_period(test, period).data) == bid_1_5 - assert update_attested_bid(get_light_client_finality_update(test).data) == bid_4_n - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bid_4_n - - # Finish test - yield from finish_test(test) - - -def run_test_multi_fork(spec, phases, state, fork_1, fork_2): - # Start test - test = yield from setup_test(spec, state, phases=phases) - - # Genesis block is post Altair and is finalized, so can be used as bootstrap - genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) - assert bootstrap_bid(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid - - # Shared history up to final epoch of period before `fork_1` - fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') - fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) - slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH - spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) - yield from select_new_head(test, spec, bid) - assert get_light_client_bootstrap(test, bid.root).spec is None - slot_period = spec.compute_sync_committee_period_at_slot(slot) - if slot_period == 0: - assert update_attested_bid(get_light_client_update_for_period(test, 0).data) == genesis_bid - else: - for period in range(0, slot_period): - assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period - state_period = spec.compute_sync_committee_period_at_slot(state.slot) - - # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch - spec_a = spec - state_a = state - slot_a = state_a.slot - bids_a = [bid] - num_sync_participants_a = 1 - fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') - while spec_a.get_current_epoch(state_a) <= fork_2_epoch: - attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - slot_a += 4 - signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - if signature_period != attested_period: - num_sync_participants_a = 0 - num_sync_participants_a += 1 - spec_a, state_a, bid_a = yield from add_new_block( - test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) - yield from select_new_head(test, spec_a, bid_a) - for bid in bids_a: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] - bids_a.append(bid_a) - - # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation - spec_b = spec - state_b = state - slot_b = state_b.slot - bids_b = [bid] - while spec_b.get_current_epoch(state_b) <= fork_2_epoch: - slot_b += 4 - signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - spec_b, state_b, bid_b = yield from add_new_block( - test, spec_b, state_b, slot=slot_b) - # Simulate that this does not become head yet, e.g., this branch was withheld - for bid in bids_b: - assert get_light_client_bootstrap(test, bid.root).spec is None - bids_b.append(bid_b) - - # Branch B: Another block that becomes head - attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - slot_b += 1 - signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) - num_sync_participants_b = 1 - spec_b, state_b, bid_b = yield from add_new_block( - test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) - yield from select_new_head(test, spec_b, bid_b) - for bid in bids_b: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_b[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_b[-1] - bids_b.append(bid_b) - - # All data for periods between the common ancestor of the two branches should have reorged. - # As there was no sync participation on branch B, that means it is deleted. - state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) - for period in range(state_period + 1, state_b_period): - assert get_light_client_update_for_period(test, period).spec is None - - # Branch A: Another block, reorging branch B once more - attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - slot_a = slot_b + 1 - signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) - if signature_period != attested_period: - num_sync_participants_a = 0 - num_sync_participants_a += 1 - spec_a, state_a, bid_a = yield from add_new_block( - test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) - yield from select_new_head(test, spec_a, bid_a) - for bid in bids_a: - assert get_light_client_bootstrap(test, bid.root).spec is None - if attested_period == signature_period: - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] - else: - assert signature_period == attested_period + 1 - assert update_attested_bid(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] - assert get_light_client_update_for_period(test, signature_period).spec is None - assert update_attested_bid(get_light_client_finality_update(test).data) == bids_a[-1] - assert update_attested_bid(get_light_client_optimistic_update(test).data) == bids_a[-1] - bids_a.append(bid_a) - - # Data has been restored - state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) - for period in range(state_period + 1, state_a_period): - assert get_light_client_update_for_period(test, period).spec is not None + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4_n # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 - 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_aligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) - 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_reorg_unaligned(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + yield from finish_lc_data_collection_test(test) diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py new file mode 100644 index 0000000000..03b7286988 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py @@ -0,0 +1,40 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py new file mode 100644 index 0000000000..d85b0dfda1 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py new file mode 100644 index 0000000000..d56ea05310 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -0,0 +1,897 @@ +from typing import (Any, Dict, List, Set) +from dataclasses import dataclass + +from eth_utils import encode_hex +from eth2spec.test.helpers.constants import ( + ALTAIR, +) +from eth2spec.test.helpers.fork_transition import ( + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.light_client import ( + compute_start_slot_at_sync_committee_period, + get_sync_aggregate, + latest_current_sync_committee_gindex, + latest_finalized_root_gindex, + latest_next_sync_committee_gindex, + latest_normalize_merkle_branch, + upgrade_lc_header_to_new_spec, + upgrade_lc_update_to_new_spec, +) + + +def _next_epoch_boundary_slot(spec, slot): + # Compute the first possible epoch boundary state slot of a `Checkpoint` + # referring to a block at given slot. + epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) + return spec.compute_start_slot_at_epoch(epoch) + + +@dataclass(frozen=True) +class BlockID(object): + slot: Any + root: Any + + +def _block_to_block_id(block): + return BlockID( + slot=block.message.slot, + root=block.message.hash_tree_root(), + ) + + +def _state_to_block_id(state): + parent_header = state.latest_block_header.copy() + parent_header.state_root = state.hash_tree_root() + return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) + + +def get_lc_bootstrap_block_id(bootstrap): + return BlockID( + slot=bootstrap.header.beacon.slot, + root=bootstrap.header.beacon.hash_tree_root(), + ) + + +def get_lc_update_attested_block_id(update): + return BlockID( + slot=update.attested_header.beacon.slot, + root=update.attested_header.beacon.hash_tree_root(), + ) + + +@dataclass +class ForkedBeaconState(object): + spec: Any + data: Any + + +@dataclass +class ForkedSignedBeaconBlock(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientHeader(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientBootstrap(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientFinalityUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientOptimisticUpdate(object): + spec: Any + data: Any + + +@dataclass +class CachedLightClientData(object): + # Sync committee branches at block's post-state + current_sync_committee_branch: Any # CurrentSyncCommitteeBranch + next_sync_committee_branch: Any # NextSyncCommitteeBranch + + # Finality information at block's post-state + finalized_slot: Any # Slot + finality_branch: Any # FinalityBranch + + # Best / latest light client data + current_period_best_update: ForkedLightClientUpdate + latest_signature_slot: Any # Slot + + +@dataclass +class LightClientDataCache(object): + # Cached data for creating future `LightClientUpdate` instances. + # Key is the block ID of which the post state was used to get the data. + # Data stored for the finalized head block and all non-finalized blocks. + data: Dict[BlockID, CachedLightClientData] + + # Light client data for the latest slot that was signed by at least + # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head + latest: ForkedLightClientFinalityUpdate + + # The earliest slot for which light client data is imported + tail_slot: Any # Slot + + +@dataclass +class LightClientDataDB(object): + headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader + current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch + sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee + best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate + + +@dataclass +class LightClientDataStore(object): + spec: Any + + # Cached data to accelerate creating light client data + cache: LightClientDataCache + + # Persistent light client data + db: LightClientDataDB + + +@dataclass +class LightClientDataCollectionTest(object): + steps: List[Dict[str, Any]] + files: Set[str] + + # Fork schedule + phases: Any + + # History access + blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock + finalized_block_roots: Dict[Any, Any] # Slot -> Root + states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + latest_finalized_epoch: Any # Epoch + latest_finalized_bid: BlockID + historical_tail_slot: Any # Slot + + # Light client data + lc_data_store: LightClientDataStore + + +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] + try: + block = test.blocks[bid.root] + while True: + if block.data.message.slot <= slot: + return _block_to_block_id(block.data) + + block = test.blocks[block.data.message.parent_root] + except KeyError: + return None + + +def _block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] + while slot >= test.historical_tail_slot: + try: + return BlockID(slot=slot, root=test.finalized_block_roots[slot]) + except KeyError: + slot = slot - 1 + return None + + +def _get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] + low_slot = max( + test.historical_tail_slot, + test.lc_data_store.spec.compute_start_slot_at_epoch( + test.lc_data_store.spec.config.ALTAIR_FORK_EPOCH) + ) + if period < test.lc_data_store.spec.compute_sync_committee_period_at_slot(low_slot): + return None + period_start_slot = compute_start_slot_at_sync_committee_period(test.lc_data_store.spec, period) + sync_committee_slot = max(period_start_slot, low_slot) + bid = _block_id_at_finalized_slot(test, sync_committee_slot) + if bid is None: + return None + block = test.blocks[bid.root] + state = test.finalized_checkpoint_states[block.data.message.state_root] + if sync_committee_slot > state.data.slot: + state.spec, state.data, _ = transition_across_forks( + state.spec, state.data, sync_committee_slot, phases=test.phases) + assert is_post_altair(state.spec) + return state.data.current_sync_committee + + +def _light_client_header_for_block(test, block): # -> ForkedLightClientHeader + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + return None + return block.data.message.body.sync_aggregate + + +def _get_light_client_data(lc_data_store, bid): # -> CachedLightClientData + # Fetch cached light client data about a given block. + # Data must be cached (`_cache_lc_data`) before calling this function. + try: + return lc_data_store.cache.data[bid] + except KeyError: + raise ValueError("Trying to get light client data that was not cached") + + +def _cache_lc_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): + # Cache data for a given block and its post-state to speed up creating future + # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + # block and state. + cached_data = CachedLightClientData( + current_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.current_sync_committee_gindex_at_slot(state.slot)), + latest_current_sync_committee_gindex(lc_data_store.spec)), + next_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.next_sync_committee_gindex_at_slot(state.slot)), + latest_next_sync_committee_gindex(lc_data_store.spec)), + finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + finality_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.finalized_root_gindex_at_slot(state.slot)), + latest_finalized_root_gindex(lc_data_store.spec)), + current_period_best_update=current_period_best_update, + latest_signature_slot=latest_signature_slot, + ) + if bid in lc_data_store.cache.data: + raise ValueError("Redundant `_cache_lc_data` call") + lc_data_store.cache.data[bid] = cached_data + + +def _delete_light_client_data(lc_data_store, bid): + # Delete cached light client data for a given block. This needs to be called + # when a block becomes unreachable due to finalization of a different fork. + del lc_data_store.cache.data[bid] + + +def _create_lc_finality_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate): # -> ForkedLightClientFinalityUpdate + attested_header = _light_client_header_for_block_id(test, attested_bid) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + finalized_bid = _block_id_at_finalized_slot(test, attested_data.finalized_slot) + if finalized_bid is not None: + if finalized_bid.slot != attested_data.finalized_slot: + # Empty slots at end of epoch, update cache for latest block slot + attested_data.finalized_slot = finalized_bid.slot + if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=attested_header.spec.LightClientHeader(), + ) + else: + finalized_header = _light_client_header_for_block_id(test, finalized_bid) + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=upgrade_lc_header_to_new_spec( + finalized_header.spec, + attested_header.spec, + finalized_header.data, + ) + ) + finality_branch = attested_data.finality_branch + return ForkedLightClientFinalityUpdate( + spec=attested_header.spec, + data=attested_header.spec.LightClientFinalityUpdate( + attested_header=attested_header.data, + finalized_header=finalized_header.data, + finality_branch=finality_branch, + sync_aggregate=sync_aggregate, + signature_slot=signature_slot, + ), + ) + + +def _create_lc_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate, + next_sync_committee): # -> ForkedLightClientUpdate + finality_update = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + return ForkedLightClientUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientUpdate( + attested_header=finality_update.data.attested_header, + next_sync_committee=next_sync_committee, + next_sync_committee_branch=attested_data.next_sync_committee_branch, + finalized_header=finality_update.data.finalized_header, + finality_branch=finality_update.data.finality_branch, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ) + ) + + +def _create_lc_update(test, spec, state, block, parent_bid): + # Create `LightClientUpdate` instances for a given block and its post-state, + # and keep track of best / latest ones. Data about the parent block's + # post-state must be cached (`_cache_lc_data`) before calling this. + + # Verify attested block (parent) is recent enough and that state is available + attested_bid = parent_bid + attested_slot = attested_bid.slot + if attested_slot < test.lc_data_store.cache.tail_slot: + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + return + + # If sync committee period changed, reset `best` + attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) + signature_slot = block.message.slot + signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + if attested_period != signature_period: + best = ForkedLightClientUpdate(spec=None, data=None) + else: + best = attested_data.current_period_best_update + + # If sync committee does not have sufficient participants, do not bump latest + sync_aggregate = block.message.body.sync_aggregate + num_active_participants = sum(sync_aggregate.sync_committee_bits) + if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: + latest_signature_slot = attested_data.latest_signature_slot + else: + latest_signature_slot = signature_slot + + # To update `best`, sync committee must have sufficient participants, and + # `signature_slot` must be in `attested_slot`'s sync committee period + if ( + num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + or attested_period != signature_period + ): + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + return + + # Check if light client data improved + update = _create_lc_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) + is_better = ( + best.spec is None + or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec( + best.spec, update.spec, best.data, test.phases)) + ) + + # Update best light client data for current sync committee period + if is_better: + best = update + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + + +def _create_lc_bootstrap(test, spec, bid): + block = test.blocks[bid.root] + period = spec.compute_sync_committee_period_at_slot(bid.slot) + if period not in test.lc_data_store.db.sync_committees: + test.lc_data_store.db.sync_committees[period] = \ + _get_current_sync_committee_for_finalized_period(test, period) + test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( + spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) + test.lc_data_store.db.current_branches[bid.slot] = \ + _get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch + + +def _process_new_block_for_light_client(test, spec, state, block, parent_bid): + # Update light client data with information from a new block. + if block.message.slot < test.lc_data_store.cache.tail_slot: + return + + if is_post_altair(spec): + _create_lc_update(test, spec, state, block, parent_bid) + else: + raise ValueError("`tail_slot` cannot be before Altair") + + +def _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): + # Update light client data to account for a new head block. + # Note that `old_finalized_bid` is not yet updated when this is called. + if head_bid.slot < test.lc_data_store.cache.tail_slot: + return + + # Commit best light client data for non-finalized periods + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) + low_period = spec.compute_sync_committee_period_at_slot(low_slot) + bid = head_bid + for period in reversed(range(low_period, head_period + 1)): + period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 + bid = get_ancestor_of_block_id(test, bid, period_end_slot) + if bid is None or bid.slot < low_slot: + break + best = _get_light_client_data(test.lc_data_store, bid).current_period_best_update + if ( + best.spec is None + or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + ): + test.lc_data_store.db.best_updates.pop(period, None) + else: + test.lc_data_store.db.best_updates[period] = best + + # Update latest light client data + head_data = _get_light_client_data(test.lc_data_store, head_bid) + signature_slot = head_data.latest_signature_slot + if signature_slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) + if signature_bid is None or signature_bid.slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) + if attested_bid is None or attested_bid.slot < low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + sync_aggregate = _sync_aggregate_for_block_id(test, signature_bid) + assert sync_aggregate is not None + test.lc_data_store.cache.latest = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + + +def _process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): + # Prune cached data that is no longer useful for creating future + # `LightClientUpdate` and `LightClientBootstrap` instances. + # This needs to be called whenever `finalized_checkpoint` changes. + finalized_slot = finalized_bid.slot + if finalized_slot < test.lc_data_store.cache.tail_slot: + return + + # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks + first_new_slot = old_finalized_bid.slot + 1 + low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) + boundary_slot = finalized_slot + while boundary_slot >= low_slot: + bid = _block_id_at_finalized_slot(test, boundary_slot) + if bid is None: + break + if bid.slot >= low_slot: + _create_lc_bootstrap(test, spec, bid) + boundary_slot = _next_epoch_boundary_slot(spec, bid.slot) + if boundary_slot < spec.SLOTS_PER_EPOCH: + break + boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH + + # Prune light client data that is no longer referrable by future updates + bids_to_delete = [] + for bid in test.lc_data_store.cache.data: + if bid.slot >= finalized_bid.slot: + continue + bids_to_delete.append(bid) + for bid in bids_to_delete: + _delete_light_client_data(test.lc_data_store, bid) + + +def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap + try: + header = test.lc_data_store.db.headers[block_root] + except KeyError: + return ForkedLightClientBootstrap(spec=None, data=None) + + slot = header.data.beacon.slot + period = header.spec.compute_sync_committee_period_at_slot(slot) + return ForkedLightClientBootstrap( + spec=header.spec, + data=header.spec.LightClientBootstrap( + header=header.data, + current_sync_committee=test.lc_data_store.db.sync_committees[period], + current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], + ) + ) + + +def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate + try: + return test.lc_data_store.db.best_updates[period] + except KeyError: + return ForkedLightClientUpdate(spec=None, data=None) + + +def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate + return test.lc_data_store.cache.latest + + +def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate + finality_update = get_light_client_finality_update(test) + if finality_update.spec is None: + return ForkedLightClientOptimisticUpdate(spec=None, data=None) + return ForkedLightClientOptimisticUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientOptimisticUpdate( + attested_header=finality_update.data.attested_header, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ), + ) + + +def setup_lc_data_collection_test(spec, state, phases=None): + assert spec.compute_slots_since_epoch_start(state.slot) == 0 + + test = LightClientDataCollectionTest( + steps=[], + files=set(), + phases=phases, + blocks={}, + finalized_block_roots={}, + states={}, + finalized_checkpoint_states={}, + latest_finalized_epoch=state.finalized_checkpoint.epoch, + latest_finalized_bid=BlockID( + slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + root=state.finalized_checkpoint.root, + ), + historical_tail_slot=state.slot, + lc_data_store=LightClientDataStore( + spec=spec, + cache=LightClientDataCache( + data={}, + latest=ForkedLightClientFinalityUpdate(spec=None, data=None), + tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), + ), + db=LightClientDataDB( + headers={}, + current_branches={}, + sync_committees={}, + best_updates={}, + ), + ), + ) + bid = _state_to_block_id(state) + yield "initial_state", state + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( + message=spec.BeaconBlock(state_root=state.hash_tree_root()), + )) + test.finalized_block_roots[bid.slot] = bid.root + test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + _cache_lc_data( + test.lc_data_store, spec, state, bid, + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + _create_lc_bootstrap(test, spec, bid) + + return test + + +def finish_lc_data_collection_test(test): + yield "steps", test.steps + + +def _encode_lc_object(test, prefix, obj, slot, genesis_validators_root): + yield from [] # Consistently enable `yield from` syntax in calling tests + + file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" + if file_name not in test.files: + test.files.add(file_name) + yield file_name, obj.data + return { + "fork_digest": encode_hex(obj.spec.compute_fork_digest( + obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), + genesis_validators_root, + )), + "data": file_name, + } + + +def add_new_block(test, spec, state, slot=None, num_sync_participants=0): + if slot is None: + slot = state.slot + 1 + assert slot > state.slot + parent_bid = _state_to_block_id(state) + + # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed + if state.slot < slot - 1: + spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) + + # Compute sync aggregate, using: + # - sync committee based on target slot + # - fork digest based on target slot - 1 + # - signed data based on parent_bid.slot + # All three slots may be from different forks + sync_aggregate, signature_slot = get_sync_aggregate( + spec, state, num_participants=num_sync_participants, phases=test.phases) + assert signature_slot == slot + + # Apply final block with computed sync aggregate + spec, state, block = transition_across_forks( + spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) + bid = _block_to_block_id(block) + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) + test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) + _process_new_block_for_light_client(test, spec, state, block, parent_bid) + block_obj = yield from _encode_lc_object( + test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, + state.genesis_validators_root, + ) + test.steps.append({ + "new_block": block_obj + }) + return spec, state, bid + + +def select_new_head(test, spec, head_bid): + old_finalized_bid = test.latest_finalized_bid + _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) + + # Process finalization + block = test.blocks[head_bid.root] + state = test.states[block.data.message.state_root] + if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: + block = test.blocks[state.data.finalized_checkpoint.root] + bid = _block_to_block_id(block.data) + new_finalized_bid = bid + if new_finalized_bid.slot > old_finalized_bid.slot: + old_finalized_epoch = None + new_finalized_epoch = state.data.finalized_checkpoint.epoch + while bid.slot > test.latest_finalized_bid.slot: + test.finalized_block_roots[bid.slot] = bid.root + finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) + if finalized_epoch != old_finalized_epoch: + state = test.states[block.data.message.state_root] + test.finalized_checkpoint_states[block.data.message.state_root] = state + old_finalized_epoch = finalized_epoch + block = test.blocks[block.data.message.parent_root] + bid = _block_to_block_id(block.data) + test.latest_finalized_epoch = new_finalized_epoch + test.latest_finalized_bid = new_finalized_bid + _process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) + + blocks_to_delete = [] + for block_root, block in test.blocks.items(): + if block.data.message.slot < new_finalized_bid.slot: + blocks_to_delete.append(block_root) + for block_root in blocks_to_delete: + del test.blocks[block_root] + states_to_delete = [] + for state_root, state in test.states.items(): + if state.data.slot < new_finalized_bid.slot: + states_to_delete.append(state_root) + for state_root in states_to_delete: + del test.states[state_root] + + yield from [] # Consistently enable `yield from` syntax in calling tests + + bootstraps = [] + for state in test.finalized_checkpoint_states.values(): + bid = _state_to_block_id(state.data) + entry = { + "block_root": encode_hex(bid.root), + } + bootstrap = get_light_client_bootstrap(test, bid.root) + if bootstrap.spec is not None: + bootstrap_obj = yield from _encode_lc_object( + test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["bootstrap"] = bootstrap_obj + bootstraps.append(entry) + + best_updates = [] + low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + for period in range(low_period, head_period + 1): + entry = { + "period": int(period), + } + update = get_light_client_update_for_period(test, period) + if update.spec is not None: + update_obj = yield from _encode_lc_object( + test, "update", update, update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["update"] = update_obj + best_updates.append(entry) + + checks = { + "latest_finalized_checkpoint": { + "epoch": int(test.latest_finalized_epoch), + "root": encode_hex(test.latest_finalized_bid.root), + }, + "bootstraps": bootstraps, + "best_updates": best_updates, + } + finality_update = get_light_client_finality_update(test) + if finality_update.spec is not None: + finality_update_obj = yield from _encode_lc_object( + test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = finality_update_obj + optimistic_update = get_light_client_optimistic_update(test) + if optimistic_update.spec is not None: + optimistic_update_obj = yield from _encode_lc_object( + test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_optimistic_update"] = optimistic_update_obj + + test.steps.append({ + "new_head": { + "head_block_root": encode_hex(head_bid.root), + "checks": checks, + } + }) + + +def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_data_collection_test(spec, state, phases=phases) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # Shared history up to final epoch of period before `fork_1` + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) + slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH + spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) + yield from select_new_head(test, spec, bid) + assert get_light_client_bootstrap(test, bid.root).spec is None + slot_period = spec.compute_sync_committee_period_at_slot(slot) + if slot_period == 0: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, 0).data) == genesis_bid + else: + for period in range(0, slot_period): + assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period + state_period = spec.compute_sync_committee_period_at_slot(state.slot) + + # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch + spec_a = spec + state_a = state + slot_a = state_a.slot + bids_a = [bid] + num_sync_participants_a = 1 + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + while spec_a.get_current_epoch(state_a) <= fork_2_epoch: + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a += 4 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation + spec_b = spec + state_b = state + slot_b = state_b.slot + bids_b = [bid] + while spec_b.get_current_epoch(state_b) <= fork_2_epoch: + slot_b += 4 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b) + # Simulate that this does not become head yet, e.g., this branch was withheld + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + bids_b.append(bid_b) + + # Branch B: Another block that becomes head + attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + slot_b += 1 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + num_sync_participants_b = 1 + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) + yield from select_new_head(test, spec_b, bid_b) + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_b[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_b[-1] + bids_b.append(bid_b) + + # All data for periods between the common ancestor of the two branches should have reorged. + # As there was no sync participation on branch B, that means it is deleted. + state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + for period in range(state_period + 1, state_b_period): + assert get_light_client_update_for_period(test, period).spec is None + + # Branch A: Another block, reorging branch B once more + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a = slot_b + 1 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Data has been restored + state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + for period in range(state_period + 1, state_a_period): + assert get_light_client_update_for_period(test, period).spec is not None + + # Finish test + yield from finish_lc_data_collection_test(test) From 24dffad1af31fe2dbda3b78a043de4b7445f9a2c Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:28:19 +0100 Subject: [PATCH 15/76] Link tests with generator --- tests/generators/light_client/main.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 6534524fe3..a5775b1cbe 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,11 +12,23 @@ bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', + 'sync', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods - electra_mods = deneb_mods + + _new_deneb_mods = {key: 'eth2spec.test.deneb.light_client.test_' + key for key in [ + 'data_collection', + 'sync', + ]} + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) + + _new_electra_mods = {key: 'eth2spec.test.electra.light_client.test_' + key for key in [ + 'data_collection', + 'sync', + ]} + electra_mods = combine_mods(_new_electra_mods, deneb_mods) all_mods = { ALTAIR: altair_mods, From eaed600263d10c1e7f15f2a98d09fb2bfffd5a73 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 14:29:44 +0100 Subject: [PATCH 16/76] Lint --- .../test/capella/light_client/test_sync.py | 1 + .../light_client/test_data_collection.py | 1 + .../test/deneb/light_client/test_sync.py | 1 + .../test/electra/light_client/test_sync.py | 1 + .../helpers/light_client_data_collection.py | 24 ++++++++++++++----- 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py index 3958900be5..99a56f96e0 100644 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -15,6 +15,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py index 03b7286988..5e894a5d13 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py @@ -14,6 +14,7 @@ run_lc_data_collection_test_multi_fork, ) + @with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py index d19e1e0238..45a8ff2c8f 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -16,6 +16,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py index 2b20552d6b..c37e8b21e1 100644 --- a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py @@ -16,6 +16,7 @@ run_lc_sync_test_upgraded_store_with_legacy_data, ) + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_config_overrides({ diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py index d56ea05310..5de9b37c61 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -816,10 +816,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_a: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] @@ -851,10 +855,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_b: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_b[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_b[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_b[-1] @@ -879,10 +887,14 @@ def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): for bid in bids_a: assert get_light_client_bootstrap(test, bid.root).spec is None if attested_period == signature_period: - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-1] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] else: assert signature_period == attested_period + 1 - assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, attested_period).data) == bids_a[-2] + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] assert get_light_client_update_for_period(test, signature_period).spec is None assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] From 531a0b08862d3c7b937802e07479b2d4dc8764bb Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 27 Nov 2024 15:43:26 +0100 Subject: [PATCH 17/76] Fix module list --- tests/generators/light_client/main.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index a5775b1cbe..a6174b277d 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,7 +12,6 @@ bellatrix_mods = altair_mods _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ - 'data_collection', 'single_merkle_proof', 'sync', ]} From 12401a5be5867b7fe219a27954e5690a5bc5439e Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 13:02:12 +0100 Subject: [PATCH 18/76] Move fork tests to origin rather than destination to fix issues --- .../test/altair/light_client/test_sync.py | 56 ++++++++++++++++++- .../light_client/__init__.py | 0 .../light_client/test_data_collection.py | 0 .../light_client/test_sync.py | 42 ++++++-------- .../light_client/test_data_collection.py | 0 .../test/capella/light_client/test_sync.py | 26 +++++---- tests/core/pyspec/eth2spec/test/context.py | 9 +++ .../test/deneb/light_client/test_sync.py | 36 ++---------- .../test/helpers/light_client_sync.py | 22 -------- tests/generators/light_client/main.py | 15 +++-- 10 files changed, 108 insertions(+), 98 deletions(-) rename tests/core/pyspec/eth2spec/test/{electra => bellatrix}/light_client/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{deneb => bellatrix}/light_client/test_data_collection.py (100%) rename tests/core/pyspec/eth2spec/test/{electra => bellatrix}/light_client/test_sync.py (55%) rename tests/core/pyspec/eth2spec/test/{electra => capella}/light_client/test_data_collection.py (100%) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 8000ceb799..1c77e648ab 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,13 +1,18 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, - with_presets, + spec_test, + with_all_phases_to, with_light_client, + with_matching_spec_config, + with_presets, + with_state, ) from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client import ( @@ -352,3 +357,52 @@ def test_advance_finality_without_sync_committee(spec, state): # Finish test yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): + # Start test (Legacy bootstrap with an upgraded store) + test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) + + # Initial `LightClientUpdate` (check that the upgraded store can process it) + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +@with_all_phases_to(CAPELLA, other_phases=[CAPELLA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) + + +@with_all_phases_to(DENEB, other_phases=[CAPELLA, DENEB]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) + + +@with_all_phases_to(ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_store_with_legacy_data(spec, phases, state): + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/electra/light_client/__init__.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/deneb/light_client/test_data_collection.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py similarity index 55% rename from tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py rename to tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py index c37e8b21e1..81b44d8749 100644 --- a/tests/core/pyspec/eth2spec/test/electra/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py @@ -7,59 +7,49 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, + BELLATRIX, CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) @spec_test @with_config_overrides({ - 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=ELECTRA) +@with_matching_spec_config(emitted_fork=CAPELLA) @with_presets([MINIMAL], reason="too slow") -def test_electra_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) +def test_capella_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) @spec_test @with_config_overrides({ 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 'DENEB_FORK_EPOCH': 4, - 'ELECTRA_FORK_EPOCH': 5, }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=ELECTRA) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_capella_electra_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) +def test_capella_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) @spec_test @with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 - 'ELECTRA_FORK_EPOCH': 4, + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, + 'ELECTRA_FORK_EPOCH': 5, }, emit=False) @with_state @with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) +def test_capella_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/electra/light_client/test_data_collection.py rename to tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py index 99a56f96e0..faa727d6d2 100644 --- a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -7,31 +7,35 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, + CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=CAPELLA) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_capella_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) +def test_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) -@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) @spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 4, +}, emit=False) @with_state -@with_matching_spec_config(emitted_fork=CAPELLA) +@with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_capella_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) +def test_deneb_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 8b2e8de6d3..f2298d297b 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,6 +436,15 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) +def with_all_phases_to(next_phase, all_phases=ALL_PHASES): + """ + A decorator factory for running a tests with every phase except the ones listed + """ + def decorator(fn): + return with_phases([phase for phase in all_phases if is_post_fork(next_phase, phase)])(fn) + return decorator + + def with_all_phases_except(exclusion_phases): """ A decorator factory for running a tests with every phase except the ones listed diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py index 45a8ff2c8f..2a2b4db118 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -7,45 +7,21 @@ with_state, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, + DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client_sync import ( - run_lc_sync_test_multi_fork, run_lc_sync_test_single_fork, - run_lc_sync_test_upgraded_store_with_legacy_data, ) -@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=DENEB) +@with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") -def test_deneb_fork(spec, phases, state): - yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_fork(spec, phases, state): - yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_store_with_legacy_data(spec, phases, state): - yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) +def test_electra_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py index e64b0a2eca..54a5c0f970 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -318,25 +318,3 @@ def run_lc_sync_test_multi_fork(spec, phases, state, fork_1, fork_2): # Finish test yield from finish_lc_sync_test(test) - - -def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): - # Start test (Legacy bootstrap with an upgraded store) - test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) - - # Initial `LightClientUpdate` (check that the upgraded store can process it) - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_lc_sync_test(test) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index a6174b277d..e362c6b4c0 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -9,25 +9,24 @@ 'sync', 'update_ranking', ]} - bellatrix_mods = altair_mods + + _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ + 'data_collection', + ]} + bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', 'sync', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) _new_deneb_mods = {key: 'eth2spec.test.deneb.light_client.test_' + key for key in [ - 'data_collection', 'sync', ]} deneb_mods = combine_mods(_new_deneb_mods, capella_mods) - - _new_electra_mods = {key: 'eth2spec.test.electra.light_client.test_' + key for key in [ - 'data_collection', - 'sync', - ]} - electra_mods = combine_mods(_new_electra_mods, deneb_mods) + electra_mods = deneb_mods all_mods = { ALTAIR: altair_mods, From 30bed615ffde18429dc349ee07b7fbcc715b9a79 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 13:06:19 +0100 Subject: [PATCH 19/76] Add missing mod --- tests/generators/light_client/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index e362c6b4c0..6420382240 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -12,6 +12,7 @@ _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ 'data_collection', + 'sync', ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) From a52a82c11e1e8ab544b5ecbb4be7297ed6b3a164 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 14:36:58 +0100 Subject: [PATCH 20/76] Extend decorator factory to support `other_phases` --- tests/core/pyspec/eth2spec/test/context.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index f2298d297b..16149bb861 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,12 +436,15 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) -def with_all_phases_to(next_phase, all_phases=ALL_PHASES): +def with_all_phases_to(next_phase, other_phases=None, all_phases=ALL_PHASES): """ - A decorator factory for running a tests with every phase except the ones listed + A decorator factory for running a tests with every phase up to and excluding the one listed """ def decorator(fn): - return with_phases([phase for phase in all_phases if is_post_fork(next_phase, phase)])(fn) + return with_phases( + [phase for phase in all_phases if is_post_fork(next_phase, phase)], + other_phases=other_phases, + )(fn) return decorator From 09e8f013105e40f487bfbd060f2a5732d9fd1ebe Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 28 Nov 2024 15:17:00 +0100 Subject: [PATCH 21/76] Make `from` -> `to` bounds explicit --- .../eth2spec/test/altair/light_client/test_sync.py | 10 +++++----- tests/core/pyspec/eth2spec/test/context.py | 10 +++++++--- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 1c77e648ab..15437f0959 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( spec_state_test_with_matching_config, spec_test, - with_all_phases_to, + with_all_phases_from_to, with_light_client, with_matching_spec_config, with_presets, @@ -12,7 +12,7 @@ state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - CAPELLA, DENEB, ELECTRA, + ALTAIR, CAPELLA, DENEB, ELECTRA, MINIMAL, ) from eth2spec.test.helpers.light_client import ( @@ -381,7 +381,7 @@ def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): yield from finish_lc_sync_test(test) -@with_all_phases_to(CAPELLA, other_phases=[CAPELLA]) +@with_all_phases_from_to(ALTAIR, CAPELLA, other_phases=[CAPELLA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=CAPELLA) @@ -390,7 +390,7 @@ def test_capella_store_with_legacy_data(spec, phases, state): yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) -@with_all_phases_to(DENEB, other_phases=[CAPELLA, DENEB]) +@with_all_phases_from_to(ALTAIR, DENEB, other_phases=[CAPELLA, DENEB]) @spec_test @with_state @with_matching_spec_config(emitted_fork=DENEB) @@ -399,7 +399,7 @@ def test_deneb_store_with_legacy_data(spec, phases, state): yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) -@with_all_phases_to(ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) +@with_all_phases_from_to(ALTAIR, ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 16149bb861..a90190287d 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,13 +436,17 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) -def with_all_phases_to(next_phase, other_phases=None, all_phases=ALL_PHASES): +def with_all_phases_from_to(from_phase, to_phase, other_phases=None, all_phases=ALL_PHASES): """ - A decorator factory for running a tests with every phase up to and excluding the one listed + A decorator factory for running a tests with every phase + from a given start phase up to and excluding a given end phase """ def decorator(fn): return with_phases( - [phase for phase in all_phases if is_post_fork(next_phase, phase)], + [phase for phase in all_phases if ( + phase != to_phase and is_post_fork(to_phase, phase) + and is_post_fork(phase, from_phase) + )], other_phases=other_phases, )(fn) return decorator From 8ab7bc60a5c5f7d709951e1486a9556d471e3ffb Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Thu, 12 Dec 2024 10:19:49 +0100 Subject: [PATCH 22/76] Address jxs comment. --- specs/phase0/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index fa569573f3..48228f0ff0 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -123,8 +123,8 @@ This section outlines the specification for the networking stack in Ethereum con Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. -All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). -The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). +All implementations MUST support the TCP libp2p transport, MAY support the QUIC (UDP) libp2p transport, and MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). +The libp2p TCP and QUIC (UDP) transports support listening on IPv4 and IPv6 addresses (and on multiple simultaneously). Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of From 85eff0c67eff2ebf9da4a9bb3d489bdfd8826df8 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 12 Dec 2024 14:58:02 +0100 Subject: [PATCH 23/76] Clarify gossip limits In the gossip specification, the `GOSSIP_MAX_SIZE` constant is specified for the uncompressed payload size in the gossipsub message. This PR clarifies how this limit applies to the various fields of the gossipsub message and provides additional limits derived from it that allow clients to more aggressively discard messages. In particular, clients are allowed to impose more strict limits on topics such as attestation and aggregates - an `Attestation` for example takes no more than `~228` bytes (to be verified!), far below the 10mb limit, though implicitly clients should already see these limits imposed as rejections by their SSZ decoder - this clarification mainly highlights the possibilty to perform this check earlier in the process. --- specs/phase0/p2p-interface.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 396e4671b8..5fd2771829 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -268,12 +268,22 @@ This defines both the type of data being sent on the topic and how the data fiel - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details. +Clients MUST reject messages with unknown topic. + *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. -Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. -Clients MUST reject (fail validation) messages that are over this size limit. -Likewise, clients MUST NOT emit or propagate messages larger than this limit. +The uncompressed payload in the [`data`](https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/pb/rpc.proto#L19) +must have has a size no greater than `GOSSIP_MAX_SIZE`. + +After compression, the payload in the `data` field must have a size no greater than +`32 + GOSSIP_MAX_SIZE + GOSSIP_MAX_SIZE / 6` (rounded down), as given by the +[snappy maximum compressed size function](https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47). + +Clients MUST reject (fail validation) messages with payloads that are over these size limits. +Likewise, clients MUST NOT emit or propagate messages larger than these limits. + +Clients MAY use [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) to determine the payload size limit, when this size is lower than `GOSSIP_MAX_SIZE`. The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, since messages are identified by content, anonymous, and signed where necessary in the application layer. @@ -288,6 +298,10 @@ The `message-id` of a gossipsub message MUST be the following 20 byte value comp the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`. +Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. + +Clients MAY reject messages whose protobuf-encoded size exceeds the maximum possible size based on the limits above. + *Note*: The above logic handles two exceptional cases: (1) multiple snappy `data` can decompress to the same value, and (2) some message `data` can fail to snappy decompress altogether. From 9c4447bdde7f12dbe033facf0ee7b4d369c24427 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Fri, 13 Dec 2024 15:22:05 -0600 Subject: [PATCH 24/76] Pepper in some lru_cache decorators --- setup.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 0bc90ae787..55f1d0e344 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ import copy from collections import OrderedDict import json -from functools import reduce +from functools import lru_cache from pysetup.constants import ( # code names @@ -70,6 +70,7 @@ def installPackage(package: str): from marko.ext.gfm.elements import Table +@lru_cache(maxsize=None) def _get_name_from_heading(heading: Heading) -> Optional[str]: last_child = heading.children[-1] if isinstance(last_child, CodeSpan): @@ -77,15 +78,18 @@ def _get_name_from_heading(heading: Heading) -> Optional[str]: return None +@lru_cache(maxsize=None) def _get_source_from_code_block(block: FencedCode) -> str: return block.children[0].children.strip() +@lru_cache(maxsize=None) def _get_function_name_from_source(source: str) -> str: fn = ast.parse(source).body[0] return fn.name +@lru_cache(maxsize=None) def _get_self_type_from_source(source: str) -> Optional[str]: fn = ast.parse(source).body[0] args = fn.args.args @@ -98,6 +102,7 @@ def _get_self_type_from_source(source: str) -> Optional[str]: return args[0].annotation.id +@lru_cache(maxsize=None) def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: class_def = ast.parse(source).body[0] base = class_def.bases[0] @@ -113,12 +118,14 @@ def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: return class_def.name, parent_class +@lru_cache(maxsize=None) def _is_constant_id(name: str) -> bool: if name[0] not in string.ascii_uppercase + '_': return False return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:])) +@lru_cache(maxsize=None) def _load_kzg_trusted_setups(preset_name): trusted_setups_file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/trusted_setup_4096.json' @@ -130,6 +137,7 @@ def _load_kzg_trusted_setups(preset_name): return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial +@lru_cache(maxsize=None) def _load_curdleproofs_crs(preset_name): """ NOTE: File generated from https://github.com/asn-d6/curdleproofs/blob/8e8bf6d4191fb6a844002f75666fb7009716319b/tests/crs.rs#L53-L67 @@ -153,6 +161,7 @@ def _load_curdleproofs_crs(preset_name): } +@lru_cache(maxsize=None) def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: _, _, title = child._parse_info if not (title[0] == "(" and title[len(title)-1] == ")"): @@ -163,6 +172,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip() +@lru_cache(maxsize=None) def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition: comment = None if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"): @@ -185,6 +195,11 @@ def _update_constant_vars_with_kzg_setups(constant_vars, preset_name): constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None) +@lru_cache(maxsize=None) +def parse_markdown(content: str): + return gfm.parse(content) + + def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject: functions: Dict[str, str] = {} protocols: Dict[str, ProtocolDefinition] = {} @@ -198,7 +213,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr custom_types: Dict[str, str] = {} with open(file_name) as source_file: - document = gfm.parse(source_file.read()) + document = parse_markdown(source_file.read()) current_name = None should_skip = False @@ -326,6 +341,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr ) +@lru_cache(maxsize=None) def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: """ Loads the a directory of preset files, merges the result into one preset. @@ -344,6 +360,7 @@ def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: return parse_config_vars(preset) +@lru_cache(maxsize=None) def load_config(config_path: Path) -> Dict[str, str]: """ Loads the given configuration file. @@ -358,7 +375,7 @@ def build_spec(fork: str, source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str: - preset = load_preset(preset_files) + preset = load_preset(tuple(preset_files)) config = load_config(config_file) all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files] From a58b1f52ddbf1daed66cac210256272694f8bb79 Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Sun, 15 Dec 2024 08:37:22 +1000 Subject: [PATCH 25/76] clarify gossip sources wording --- specs/_features/eip7732/p2p-interface.md | 4 ++-- specs/deneb/p2p-interface.md | 2 +- specs/fulu/p2p-interface.md | 2 +- specs/phase0/p2p-interface.md | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index df02cc2382..22b0ba7ede 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -151,7 +151,7 @@ This topic is used to propagate execution payload messages as `SignedExecutionPa The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`: -- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). +- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). - _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder. Let `block` be the block with `envelope.beacon_block_root`. @@ -171,7 +171,7 @@ The following validations MUST pass before forwarding the `payload_attestation_m - _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`. - _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`. - _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`. -- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). +- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). - _[REJECT]_ The message's block `data.beacon_block_root` passes validation. - _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice. - _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index. diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 5f71bc854a..b3edc9d5bf 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -181,7 +181,7 @@ The following validations MUST pass before forwarding the `blob_sidecar` on the - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index abebbffecc..26227a7eda 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -195,7 +195,7 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 396e4671b8..4f7749a007 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -338,7 +338,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` - _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`. - _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey. - _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. - _[REJECT]_ The block is from a higher slot than its parent. @@ -387,7 +387,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ - _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, is valid. - _[REJECT]_ The signature of `aggregate` is valid. - _[IGNORE]_ The block being voted for (`aggregate.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue aggregates for processing once block is retrieved). - _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - _[REJECT]_ The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. @@ -462,7 +462,7 @@ The following validations MUST pass before forwarding the `attestation` on the s that has an identical `attestation.data.target.epoch` and participating validator index. - _[REJECT]_ The signature of `attestation` is valid. - _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue attestations for processing once block is retrieved). - _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. - _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. From 022bb22c777dd9b050c8eb6a1686101bfe2f5013 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Dec 2024 11:31:35 +0100 Subject: [PATCH 26/76] Use single constant for gossip/req/resp, clarify encoded sizes --- specs/phase0/p2p-interface.md | 70 +++++++++++++++++++++++------------ 1 file changed, 47 insertions(+), 23 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 5fd2771829..29624e4fa8 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -193,11 +193,10 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `GOSSIP_MAX_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed gossip messages. | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks. | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | -| `MAX_CHUNK_SIZE` | `10 * 2**20` (=10485760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. | | `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | | `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes. | | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `DomainType('0x00000000')` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | @@ -229,6 +228,21 @@ Where is entirely independent of the ENR sequence number, and will in most cases be out of sync with the ENR sequence number. +### Maximum message sizes + +Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: + +```python +def max_compressed_len(n): + # Worst-case compressed length for a given payload of size n when using snappy + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return int(32 + n + n / 6) + +def max_message_size(): + # Allow 1024 bytes for framing and encoding overhead but at least 1MB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024*1024) +``` + ### The gossip domain: gossipsub Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol @@ -273,18 +287,6 @@ Clients MUST reject messages with unknown topic. *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. -The uncompressed payload in the [`data`](https://github.com/libp2p/go-libp2p-pubsub/blob/c06df2f9a38e9382e644b241adf0e96e5ca00955/pb/rpc.proto#L19) -must have has a size no greater than `GOSSIP_MAX_SIZE`. - -After compression, the payload in the `data` field must have a size no greater than -`32 + GOSSIP_MAX_SIZE + GOSSIP_MAX_SIZE / 6` (rounded down), as given by the -[snappy maximum compressed size function](https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47). - -Clients MUST reject (fail validation) messages with payloads that are over these size limits. -Likewise, clients MUST NOT emit or propagate messages larger than these limits. - -Clients MAY use [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) to determine the payload size limit, when this size is lower than `GOSSIP_MAX_SIZE`. - The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, since messages are identified by content, anonymous, and signed where necessary in the application layer. Starting from Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign` @@ -300,8 +302,6 @@ The `message-id` of a gossipsub message MUST be the following 20 byte value comp Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. -Clients MAY reject messages whose protobuf-encoded size exceeds the maximum possible size based on the limits above. - *Note*: The above logic handles two exceptional cases: (1) multiple snappy `data` can decompress to the same value, and (2) some message `data` can fail to snappy decompress altogether. @@ -516,6 +516,16 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations. +#### Gossipsub size limits + +Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-rpc) frame as well as the encoded payload in each [`Message`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-message). + +Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: + +* the size of the encoded `RPCMsg`, including control messages and framing, must not exceed `max_message_size()` +* the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. +* the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. + ### The Req/Resp domain #### Protocol identification @@ -565,7 +575,7 @@ All other response types (non-Lists) send a single `response_chunk`. For both `request`s and `response`s, the `encoding-dependent-header` MUST be valid, and the `encoded-payload` must be valid within the constraints of the `encoding-dependent-header`. This includes type-specific bounds on payload size for some encoding strategies. -Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_CHUNK_SIZE` MUST be applied to all method response chunks. +Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_PAYLOAD_SIZE` MUST be applied to all method response chunks. Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. @@ -679,15 +689,13 @@ When snappy is applied, it can be passed through a buffered Snappy reader to dec Before reading the payload, the header MUST be validated: - The unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes, which is sufficient for any `uint64`. -- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds). +- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) or `MAX_PAYLOAD_SIZE`, whichever is smaller. After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header. -A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -- For `ssz_snappy` this is: `32 + n + n // 6`. - This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy. +A reader MUST NOT read more than `max_compressed_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -A reader SHOULD consider the following cases as invalid input: +A reader MUST consider the following cases as invalid input: - Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected if more bytes are read than required. - An early EOF, before fully reading the declared length-prefix worth of SSZ bytes. @@ -1444,7 +1452,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi * Alignment with protocols like gRPC over HTTP/2 that prefix with length * Sanity checking of message length, and enabling much stricter message length limiting based on SSZ type information, to provide even more DOS protection than the global message length already does. - E.g. a small `Status` message does not nearly require `MAX_CHUNK_SIZE` bytes. + E.g. a small `Status` message does not nearly require `MAX_PAYLOAD_SIZE` bytes. [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. @@ -1693,6 +1701,22 @@ Other types are static, they have a fixed size: no dynamic-length content is inv For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds. +#### Why is the message size defined in terms of application payload? + +When transmitting messages over gossipsub and / or req/resp, we want to ensure that the same payload sizes are supported no matter the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. + +To derive "encoded size limits" from desired application sizes we take into account snappy compression and framing overhead. + +In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame - the limit is set such that at least one max-sized application-level message together with a small amount (1kb) of gossipsub overhead is allowed - implementations are free to pack multiple smaller application messages into a single gossipsub frame, and / or combine it with control messages as they see fit. + +The limit is set on the uncompressed payload size in particular to protect against decompression bombs - although + +#### Why is there a limit on message sizes at all? + +The message size limit protects against several forms of DoS and network-based amplification attacks and provide upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. + +In particular, blocks which at the time of writing is the only message type without a practical SSZ-derived upper bound on size cannot be fully verified synchronously as part of gossipsub validity checks meaning that there exist cases where invalid messages signed by a validator may be amplified by the network. + ## libp2p implementations matrix This section will soon contain a matrix showing the maturity/state of the libp2p features required From 44ab11d1551dc5e3d017d9a98ef37d339a437d18 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 17 Dec 2024 11:44:26 +0100 Subject: [PATCH 27/76] doctoc --- specs/phase0/p2p-interface.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 29624e4fa8..8bc05844c1 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -16,6 +16,7 @@ - [Constants](#constants) - [Configuration](#configuration) - [MetaData](#metadata) + - [Maximum message sizes](#maximum-message-sizes) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -28,6 +29,7 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [Attestations and Aggregation](#attestations-and-aggregation) - [Encodings](#encodings) + - [Gossipsub size limits](#gossipsub-size-limits) - [The Req/Resp domain](#the-reqresp-domain) - [Protocol identification](#protocol-identification) - [Req/Resp interaction](#reqresp-interaction) @@ -102,6 +104,8 @@ - [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression) - [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes) - [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds) + - [Why is the message size defined in terms of application payload?](#why-is-the-message-size-defined-in-terms-of-application-payload) + - [Why is there a limit on message sizes at all?](#why-is-there-a-limit-on-message-sizes-at-all) - [libp2p implementations matrix](#libp2p-implementations-matrix) @@ -522,7 +526,7 @@ Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/bl Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: -* the size of the encoded `RPCMsg`, including control messages and framing, must not exceed `max_message_size()` +* the size of the encoded `RPCMsg`, including control messages, framing, topics etc, must not exceed `max_message_size()` * the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. * the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. From 305f30e89505615d924e42f81105b6de104c8e74 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:15:13 -0600 Subject: [PATCH 28/76] Bump circleci's cached venv key --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1de55179d4..37e094e1de 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v30-pyspec + venv_name: v31-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v30-pyspec + venv_name: v31-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From 702722fe6995d2917aa9d6e1eb23085dde539b06 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:25:21 -0600 Subject: [PATCH 29/76] Bump circleci's cached repo key --- .circleci/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 37e094e1de..9be3106db1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,16 +53,16 @@ jobs: # Restore git repo at point close to target branch/revision, to speed up checkout - restore_cache: keys: - - v3-specs-repo-{{ .Branch }}-{{ .Revision }} - - v3-specs-repo-{{ .Branch }}- - - v3-specs-repo- + - v4-specs-repo-{{ .Branch }}-{{ .Revision }} + - v4-specs-repo-{{ .Branch }}- + - v4-specs-repo- - checkout - run: name: Clean up git repo to reduce cache size command: git gc # Save the git checkout as a cache, to make cloning next time faster. - save_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo install_pyspec_test: @@ -71,7 +71,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install pyspec requirements @@ -83,7 +83,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -96,7 +96,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -109,7 +109,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -122,7 +122,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -135,7 +135,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -148,7 +148,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -161,7 +161,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -174,7 +174,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -187,7 +187,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install doctoc From b1205ef967de705957df1f50e6c5453d8bde09de Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:42:46 -0600 Subject: [PATCH 30/76] Revert "Bump circleci's cached venv key" This reverts commit 305f30e89505615d924e42f81105b6de104c8e74. --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9be3106db1..d142e4ac24 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v31-pyspec + venv_name: v30-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v31-pyspec + venv_name: v30-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From 46f1dde2b7fd487b107a69b90aeb60366da762cf Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:42:56 -0600 Subject: [PATCH 31/76] Revert "Bump circleci's cached repo key" This reverts commit 702722fe6995d2917aa9d6e1eb23085dde539b06. --- .circleci/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d142e4ac24..1de55179d4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -53,16 +53,16 @@ jobs: # Restore git repo at point close to target branch/revision, to speed up checkout - restore_cache: keys: - - v4-specs-repo-{{ .Branch }}-{{ .Revision }} - - v4-specs-repo-{{ .Branch }}- - - v4-specs-repo- + - v3-specs-repo-{{ .Branch }}-{{ .Revision }} + - v3-specs-repo-{{ .Branch }}- + - v3-specs-repo- - checkout - run: name: Clean up git repo to reduce cache size command: git gc # Save the git checkout as a cache, to make cloning next time faster. - save_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} paths: - ~/specs-repo install_pyspec_test: @@ -71,7 +71,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install pyspec requirements @@ -83,7 +83,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -96,7 +96,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -109,7 +109,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -122,7 +122,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -135,7 +135,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -148,7 +148,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -161,7 +161,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -174,7 +174,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Run py-tests @@ -187,7 +187,7 @@ jobs: working_directory: ~/specs-repo steps: - restore_cache: - key: v4-specs-repo-{{ .Branch }}-{{ .Revision }} + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} - restore_pyspec_cached_venv - run: name: Install doctoc From cb4ed99f4e889c754dba3f2aadad3ed744c00e23 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 10:54:15 -0600 Subject: [PATCH 32/76] Fix linting errors for new functions --- specs/phase0/p2p-interface.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 8bc05844c1..e400dff58c 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -17,6 +17,8 @@ - [Configuration](#configuration) - [MetaData](#metadata) - [Maximum message sizes](#maximum-message-sizes) + - [`max_compressed_len`](#max_compressed_len) + - [`max_message_size`](#max_message_size) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -236,15 +238,21 @@ and will in most cases be out of sync with the ENR sequence number. Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: +#### `max_compressed_len` + +```python +def max_compressed_len(n: uint64) -> uint64: + # Worst-case compressed length for a given payload of size n when using snappy + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return uint64(32 + n + n / 6) +``` + +#### `max_message_size` + ```python -def max_compressed_len(n): - # Worst-case compressed length for a given payload of size n when using snappy - # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 - return int(32 + n + n / 6) - -def max_message_size(): - # Allow 1024 bytes for framing and encoding overhead but at least 1MB in case MAX_PAYLOAD_SIZE is small. - return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024*1024) +def max_message_size() -> uint64: + # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024) ``` ### The gossip domain: gossipsub From d41b7bddf5e77c8a7d49832b11485f53f7c5e83f Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 11:35:49 -0600 Subject: [PATCH 33/76] Bump venv cache key again --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1de55179d4..38bd6f422d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: From ea37fc5140b299249e3dd5adcac748d4303b6ccc Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 17 Dec 2024 14:23:25 -0600 Subject: [PATCH 34/76] Fix a few nits dealing with updated makefile * Hide output from forced eth2spec rebuild * Call detect_errors after all generators are done * Allow output to stderr to show up in console when testing * Add note about printing to stderr * Make check_toc private, as one should only use make lint * Move _check_toc rule closer to lint rule * Force rebuild eth2spec when running generators * And do not rebuild pyspec now, no longer needed --- Makefile | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index a3a3e24288..09e914c3ca 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,6 @@ ALL_EXECUTABLE_SPEC_NAMES = \ # A list of fake targets. .PHONY: \ - check_toc \ clean \ coverage \ detect_errors \ @@ -39,7 +38,6 @@ NORM = $(shell tput sgr0) # Print target descriptions. help: - @echo "make $(BOLD)check_toc$(NORM) -- check table of contents" @echo "make $(BOLD)clean$(NORM) -- delete all untracked files" @echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage" @echo "make $(BOLD)detect_errors$(NORM) -- detect generator errors" @@ -85,7 +83,7 @@ $(ETH2SPEC): setup.py | $(VENV) # Force rebuild/install the eth2spec package. eth2spec: - $(MAKE) --always-make $(ETH2SPEC) + @$(MAKE) --always-make $(ETH2SPEC) # Create the pyspec for all phases. pyspec: $(VENV) setup.py @@ -99,6 +97,8 @@ pyspec: $(VENV) setup.py TEST_REPORT_DIR = $(PYSPEC_DIR)/test-reports # Run pyspec tests. +# Note: for debugging output to show, print to stderr. +# # To run a specific test, append k=, eg: # make test k=test_verify_kzg_proof # To run tests for a specific fork, append fork=, eg: @@ -117,6 +117,7 @@ test: $(ETH2SPEC) pyspec @mkdir -p $(TEST_REPORT_DIR) @$(PYTHON_VENV) -m pytest \ -n auto \ + --capture=no \ $(MAYBE_TEST) \ $(MAYBE_FORK) \ $(PRESET) \ @@ -193,10 +194,6 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \ $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \ $(wildcard $(SSZ_DIR)/*.md) -# Check all files and error if any ToC were modified. -check_toc: $(MARKDOWN_FILES:=.toc) - @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 - # Generate ToC sections & save copy of original if modified. %.toc: @cp $* $*.tmp; \ @@ -209,8 +206,12 @@ check_toc: $(MARKDOWN_FILES:=.toc) echo "\033[1;34m See $*.tmp\033[0m"; \ fi +# Check all files and error if any ToC were modified. +_check_toc: $(MARKDOWN_FILES:=.toc) + @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 + # Check for mistakes. -lint: $(ETH2SPEC) pyspec check_toc +lint: $(ETH2SPEC) pyspec _check_toc @$(CODESPELL_VENV) . --skip "./.git,$(VENV),$(PYSPEC_DIR)/.mypy_cache" -I .codespell-whitelist @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(PYSPEC_DIR)/eth2spec @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(TEST_GENERATORS_DIR) @@ -235,17 +236,19 @@ gen_list: done # Run one generator. +# This will forcibly rebuild eth2spec just in case. # To check modules for a generator, append modcheck=true, eg: # make gen_genesis modcheck=true gen_%: MAYBE_MODCHECK := $(if $(filter true,$(modcheck)),--modcheck) -gen_%: $(ETH2SPEC) pyspec +gen_%: eth2spec @mkdir -p $(TEST_VECTOR_DIR) @$(PYTHON_VENV) $(GENERATOR_DIR)/$*/main.py \ --output $(TEST_VECTOR_DIR) \ $(MAYBE_MODCHECK) # Run all generators then check for errors. -gen_all: $(GENERATOR_TARGETS) detect_errors +gen_all: $(GENERATOR_TARGETS) + @$(MAKE) detect_errors # Detect errors in generators. detect_errors: $(TEST_VECTOR_DIR) From 0964db294c718b0d3574945a963cd2502a1aef32 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 18 Dec 2024 17:28:51 +1100 Subject: [PATCH 35/76] Fix custody `sampling_size` logic. --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 25576bc1f4..6908144799 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count)` total custody groups. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS / NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From 7be22acf6ac2b07f15f8b2af16b13e81ce57557f Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:26:16 +0800 Subject: [PATCH 36/76] Remove non commit-pinned blob links --- tests/README.md | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tests/README.md b/tests/README.md index 798627577d..dc2e02439d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -54,15 +54,14 @@ To learn how consensus spec tests are written, let's go over the code: This [decorator](https://book.pythontips.com/en/latest/decorators.html) specifies that this test is applicable to all the phases of consensus layer development. These phases are similar to forks (Istanbul, -Berlin, London, etc.) in the execution blockchain. If you are interested, [you can see the definition of -this decorator here](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L331-L335). +Berlin, London, etc.) in the execution blockchain. ```python @spec_state_test ``` -[This decorator](https://github.com/qbzzt/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L232-L234) specifies -that this test is a state transition test, and that it does not include a transition between different forks. +This decorator specifies that this test is a state transition test, and that it does not include a transition +between different forks. ```python def test_empty_block_transition(spec, state): @@ -162,8 +161,7 @@ find . -name '*.py' -exec grep 'def state_transition_and_sign_block' {} \; -prin ``` And you'll find that the function is defined in -[`eth2spec/test/helpers/state.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/state.py). Looking -in that file, we see that the second function is: +`eth2spec/test/helpers/state.py`. Looking in that file, we see that the second function is: ```python def next_slot(spec, state): @@ -199,8 +197,7 @@ verify this). It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol is supposed to reject something. To see such a test, look at `test_prev_slot_block_transition` (in the same -file we used previously, -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py)). +file we used previously, `~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`). ```python @with_all_phases @@ -230,8 +227,7 @@ Transition to the new slot, which naturally has a different proposer. ``` Specify that the function `transition_unsigned_block` will cause an assertion error. -You can see this function in -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/block.py), +You can see this function in `~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`, and one of the tests is that the block must be for this slot: > ```python > assert state.slot == block.slot From 35603f5417f6fa9cdd723fb3a546a59c215384ae Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Wed, 18 Dec 2024 13:29:44 +0100 Subject: [PATCH 37/76] Metadata: Replace `csc` by `cgc`. --- specs/fulu/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index abebbffecc..e846cb59fc 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -152,14 +152,14 @@ The `MetaData` stored locally by clients is updated with an additional field to seq_number: uint64 attnets: Bitvector[ATTESTATION_SUBNET_COUNT] syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT] - custody_subnet_count: uint64 # csc + custody_group_count: uint64 # cgc ) ``` Where - `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the Altair document. -- `custody_subnet_count` represents the node's custody subnet count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. +- `custody_group_count` represents the node's custody group count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. ### The gossip domain: gossipsub From dde81194d7dc68c839d643cb255fbc6243a696ba Mon Sep 17 00:00:00 2001 From: Suphanat Chunhapanya Date: Wed, 18 Dec 2024 22:06:45 +0700 Subject: [PATCH 38/76] EIP-7594: Fix custody group spec tests --- .../test_compute_columns_for_custody_group.py | 62 ++++++++++ .../networking/test_get_custody_columns.py | 113 ------------------ .../networking/test_get_custody_groups.py | 106 ++++++++++++++++ tests/generators/networking/main.py | 3 +- 4 files changed, 170 insertions(+), 114 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py delete mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py create mode 100644 tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py new file mode 100644 index 0000000000..61752e919a --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py @@ -0,0 +1,62 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_compute_columns_for_custody_group(spec, rng, custody_group=None): + if custody_group is None: + custody_group = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + result = spec.compute_columns_for_custody_group(custody_group) + yield 'custody_group', 'meta', custody_group + + assert len(result) == len(set(result)) + assert len(result) == spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS + assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__min_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__max_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__1(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__2(spec): + rng = random.Random(2222) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__3(spec): + rng = random.Random(3333) + yield from _run_compute_columns_for_custody_group(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py deleted file mode 100644 index d3be42ce16..0000000000 --- a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py +++ /dev/null @@ -1,113 +0,0 @@ -import random - -from eth2spec.test.context import ( - single_phase, - spec_test, - with_fulu_and_later, -) - - -def _run_get_custody_columns(spec, rng, node_id=None, custody_group_count=None): - if node_id is None: - node_id = rng.randint(0, 2**256 - 1) - - if custody_group_count is None: - custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) - - columns_per_group = spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS - groups = spec.get_custody_groups(node_id, custody_group_count) - yield 'node_id', 'meta', node_id - yield 'custody_group_count', 'meta', int(custody_group_count) - - result = [] - for group in groups: - group_columns = spec.compute_columns_for_custody_group(group) - assert len(group_columns) == columns_per_group - result.extend(group_columns) - - assert len(result) == len(set(result)) - assert len(result) == custody_group_count * columns_per_group - assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) - python_list_result = [int(i) for i in result] - - yield 'result', 'meta', python_list_result - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=0, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=0, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=2**256 - 1, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 1, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count_minus_1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 2, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__short_node_id(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=1048576, custody_group_count=1) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__2(spec): - rng = random.Random(2222) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__3(spec): - rng = random.Random(3333) - yield from _run_get_custody_columns(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py new file mode 100644 index 0000000000..8d33a2b920 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py @@ -0,0 +1,106 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_get_custody_groups(spec, rng, node_id=None, custody_group_count=None): + if node_id is None: + node_id = rng.randint(0, 2**256 - 1) + + if custody_group_count is None: + custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) + + result = spec.get_custody_groups(node_id, custody_group_count) + yield 'node_id', 'meta', node_id + yield 'custody_group_count', 'meta', int(custody_group_count) + + assert len(result) == len(set(result)) + assert len(result) == custody_group_count + assert all(i < spec.config.NUMBER_OF_CUSTODY_GROUPS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=0, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=0, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=2**256 - 1, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 1, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count_minus_1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 2, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__short_node_id(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=1048576, custody_group_count=1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__2(spec): + rng = random.Random(2222) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__3(spec): + rng = random.Random(3333) + yield from _run_get_custody_groups(spec, rng) diff --git a/tests/generators/networking/main.py b/tests/generators/networking/main.py index 3217c2cce2..a670f7bd4d 100644 --- a/tests/generators/networking/main.py +++ b/tests/generators/networking/main.py @@ -5,7 +5,8 @@ if __name__ == "__main__": fulu_mods = {key: 'eth2spec.test.fulu.networking.test_' + key for key in [ - 'get_custody_columns', + 'compute_columns_for_custody_group', + 'get_custody_groups', ]} all_mods = { FULU: fulu_mods From c33124ebe40fe2092c138b2c90959330810f539a Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 18 Dec 2024 11:38:55 -0600 Subject: [PATCH 39/76] Use integer division --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 6908144799..923cb7db29 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS / NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From 8e0d0d48e81d6c7c5a8253ab61340f5ea5bac66a Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 19 Dec 2024 09:29:58 +1100 Subject: [PATCH 40/76] Simplify inline code Co-authored-by: Justin Traglia <95511699+jtraglia@users.noreply.github.com> --- specs/fulu/das-core.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 923cb7db29..31c4af3c38 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -237,7 +237,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * (NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS))` total columns. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * columns_per_group)` total columns, where `columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS`. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data From f17663f89c25e0212279a05446065c17344beaa0 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Fri, 20 Dec 2024 14:55:39 +0000 Subject: [PATCH 41/76] Update blob sidecar subnet computation for EIP-7691 --- specs/electra/validator.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/specs/electra/validator.md b/specs/electra/validator.md index 2e980d5345..3620c30790 100644 --- a/specs/electra/validator.md +++ b/specs/electra/validator.md @@ -24,6 +24,8 @@ - [Deposits](#deposits) - [Execution payload](#execution-payload) - [Execution Requests](#execution-requests) + - [Constructing the `BlobSidecar`s](#constructing-the-blobsidecars) + - [Sidecar](#sidecar) - [Attesting](#attesting) - [Construct attestation](#construct-attestation) - [Attestation aggregation](#attestation-aggregation) @@ -240,6 +242,17 @@ def get_execution_requests(execution_requests_list: Sequence[bytes]) -> Executio ) ``` +### Constructing the `BlobSidecar`s + +#### Sidecar + +*[Modified in Electra:EIP7691]* + +```python +def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: + return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) +``` + ## Attesting ### Construct attestation From 9e6d8a71f0f1335f38114b6160dce01d4dfaa6dd Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 23 Dec 2024 12:33:41 -0600 Subject: [PATCH 42/76] Fix garbled blob_kzg_commitments accesses --- specs/_features/eip7732/p2p-interface.md | 2 +- specs/deneb/p2p-interface.md | 2 +- specs/electra/p2p-interface.md | 2 +- specs/fulu/p2p-interface.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index 22b0ba7ede..a2716933cd 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -130,7 +130,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed: -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK +- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` - _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. - If `execution_payload` verification of block's parent by an execution node is *not* complete: diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index b3edc9d5bf..e38a50ba2e 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -147,7 +147,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB New validation: - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` ###### `beacon_aggregate_and_proof` diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 8ebec6a8e6..d0663943f1 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -67,7 +67,7 @@ The derivation of the `message-id` remains stable. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` ###### `beacon_aggregate_and_proof` diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index 0782d6ac0b..ef8a9b9c03 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -174,7 +174,7 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` ##### Blob subnets From 6590cd0398adae53fd2ef13dd06e80d3471386a1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 23 Dec 2024 12:37:28 -0600 Subject: [PATCH 43/76] Fix two minor typos --- docker/README.md | 2 +- specs/_features/custody_game/beacon-chain.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docker/README.md b/docker/README.md index 4824fc283a..34bdd94c51 100644 --- a/docker/README.md +++ b/docker/README.md @@ -10,7 +10,7 @@ Handy commands: Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users. -The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. +The `scripts/build_run_docker_tests.sh` script will cover most use cases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. E.g: - `./build_run_docker_tests.sh --p mainnet` will run the mainnet preset tests diff --git a/specs/_features/custody_game/beacon-chain.md b/specs/_features/custody_game/beacon-chain.md index 092846a484..66aea773a7 100644 --- a/specs/_features/custody_game/beacon-chain.md +++ b/specs/_features/custody_game/beacon-chain.md @@ -619,7 +619,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed for attester_index in attesters: if attester_index != custody_slashing.malefactor_index: increase_balance(state, attester_index, whistleblower_reward) - # No special whisteblower reward: it is expected to be an attester. Others are free to slash too however. + # No special whistleblower reward: it is expected to be an attester. Others are free to slash too however. else: # The claim was false, the custody bit was correct. Slash the whistleblower that induced this work. slash_validator(state, custody_slashing.whistleblower_index) From 92a2b20c0ee239de05fc5b76fc98d39d682b9bd1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Sat, 28 Dec 2024 09:36:03 -0600 Subject: [PATCH 44/76] In get_custody_groups, don't skip 0 value --- specs/fulu/das-core.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 31c4af3c38..846f6b206e 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -105,19 +105,20 @@ class MatrixEntry(Container): def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence[CustodyIndex]: assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS - custody_groups: List[uint64] = [] current_id = uint256(node_id) + custody_groups: List[CustodyIndex] = [] while len(custody_groups) < custody_group_count: custody_group = CustodyIndex( - bytes_to_uint64(hash(uint_to_bytes(uint256(current_id)))[0:8]) + bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS ) if custody_group not in custody_groups: custody_groups.append(custody_group) if current_id == UINT256_MAX: # Overflow prevention - current_id = NodeID(0) - current_id += 1 + current_id = uint256(0) + else: + current_id += 1 assert len(custody_groups) == len(set(custody_groups)) return sorted(custody_groups) From 6dd929fa8d621e48560412fbf38c3a19778fbe5f Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Sat, 4 Jan 2025 23:04:03 +0100 Subject: [PATCH 45/76] Deneb: Add BeaconState During the Deneb fork, some fields were added to the `ExecutionPayloadHeader`. The `ExecutionPayloadHeader` is part of the `BeaconState`. ==> This change should be reflected in the `BeaconState`. --- specs/deneb/beacon-chain.md | 48 +++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 43360f8b3e..966a7007d9 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -20,6 +20,7 @@ - [`BeaconBlockBody`](#beaconblockbody) - [`ExecutionPayload`](#executionpayload) - [`ExecutionPayloadHeader`](#executionpayloadheader) + - [`BeaconState`](#beaconstate) - [Helper functions](#helper-functions) - [Misc](#misc) - [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash) @@ -171,6 +172,53 @@ class ExecutionPayloadHeader(Container): excess_blob_gas: uint64 # [New in Deneb:EIP4844] ``` +#### `BeaconState` + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Execution + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Deneb:EIP4844] + # Withdrawals + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + # Deep history valid from Capella onwards + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] +``` + ## Helper functions ### Misc From 4f7fe8230d5758b44d70149ccdfc473dbe3fdfde Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Mon, 6 Jan 2025 11:58:04 +0100 Subject: [PATCH 46/76] Fulu: Remove V3 of blob sidecar by root/range RPC The Fulu fork introduces peerDAS, replacing blobs sidecars by data columns sidecars. After the Fulu fork epoch, clients still need to be able to request blob sidecars by root/range, at least for the blobs retention period after the Fulu fork epoch. Blob sidecars will be retrieved at most up to the Electra epoch, so the V2 version (Electra) for blob sidecars by range/root is enough. There is no need to retrieve blobs sidecars after the Fulu fork where data columns sidecars will be used instead, so there is no need to introduce the V3 version (Fulu) for blob sidecars by range/root. --- specs/fulu/p2p-interface.md | 72 ------------------------------------- 1 file changed, 72 deletions(-) diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index ef8a9b9c03..73d96192ff 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -29,8 +29,6 @@ - [`data_column_sidecar_{subnet_id}`](#data_column_sidecar_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v3](#blobsidecarsbyroot-v3) - - [BlobSidecarsByRange v3](#blobsidecarsbyrange-v3) - [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1) - [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1) - [GetMetaData v3](#getmetadata-v3) @@ -64,7 +62,6 @@ The specification of these changes continues in the same format as the network s | `DATA_COLUMN_SIDECAR_SUBNET_COUNT` | `128` | The number of data column sidecar subnets used in the gossipsub protocol | | `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request | | `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars | -| `MAX_REQUEST_BLOB_SIDECARS_FULU` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` | Maximum number of blob sidecars in a single request | ### Containers @@ -211,75 +208,6 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi #### Messages -##### BlobSidecarsByRoot v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -No more than `MAX_REQUEST_BLOB_SIDECARS_FULU` may be requested at a time. - -##### BlobSidecarsByRange v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - start_slot: Slot - count: uint64 -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_FULU` sidecars. - ##### DataColumnSidecarsByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_root/1/` From 7d511becf65946c26d654c17be452d595ce38522 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Mon, 6 Jan 2025 10:40:41 +0100 Subject: [PATCH 47/76] Emit correct block hash in random Electra tests New tests were added in #4032 with incorrect EL block hash, fix these. --- .../pyspec/eth2spec/test/utils/randomized_block_tests.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 0e4727b794..3dae15c694 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -8,7 +8,7 @@ from typing import Callable from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash, + compute_el_block_hash_for_block, build_randomized_execution_payload, ) from eth2spec.test.helpers.multi_operations import ( @@ -255,7 +255,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 opaque_tx, _, blob_kzg_commitments, _ = get_sample_blob_tx( spec, blob_count=rng.randint(0, spec.config.MAX_BLOBS_PER_BLOCK), rng=rng) block.body.execution_payload.transactions.append(opaque_tx) - block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) block.body.blob_kzg_commitments = blob_kzg_commitments return block @@ -264,6 +264,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 def random_block_electra(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_deneb(spec, state, signed_blocks, scenario_state, rng=rng) block.body.execution_requests = get_random_execution_requests(spec, state, rng=rng) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) return block From 777c023babdce7dffad295bcb602ba26b014bb90 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 6 Jan 2025 08:46:48 -0600 Subject: [PATCH 48/76] Update unit test --- .../eth2spec/test/fulu/unittests/test_config_invariants.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py index fcf98c7e75..a0b8d30ac3 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py @@ -32,7 +32,3 @@ def test_polynomical_commitments_sampling(spec): @single_phase def test_networking(spec): assert spec.config.MAX_BLOBS_PER_BLOCK_FULU <= spec.MAX_BLOB_COMMITMENTS_PER_BLOCK - assert ( - spec.config.MAX_REQUEST_BLOB_SIDECARS_FULU == - spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.MAX_BLOBS_PER_BLOCK_FULU - ) From d1d50cc539a11d04f78ea60ebae5b423353f17b9 Mon Sep 17 00:00:00 2001 From: NC <17676176+ensi321@users.noreply.github.com> Date: Mon, 6 Jan 2025 13:34:47 -0800 Subject: [PATCH 49/76] Remove electra.BlobSidecar --- specs/electra/p2p-interface.md | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index d0663943f1..bd9020c0fa 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -116,15 +116,6 @@ The following validations are removed: *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` @@ -151,15 +142,6 @@ No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` From 44cecd2caa0345bae46341641738608f1d8e58fe Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Jan 2025 18:31:04 +0100 Subject: [PATCH 50/76] fix bellatrix constant too --- specs/bellatrix/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 1f4c815660..5d8425e888 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -148,8 +148,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: #### Why was the max gossip message size increased at Bellatrix? With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic -field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in -place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network. +field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in +place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 Mib on the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction filled entirely with data at a cost of 16 gas per byte can create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for From 3fced0903f211118b8c02e0467b925c41c4209da Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 7 Jan 2025 16:28:01 -0600 Subject: [PATCH 51/76] Add fork test with inactive, compounding validator with excess balance --- .../electra/fork/test_electra_fork_basic.py | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index aade4a1605..4416063b39 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -151,6 +151,40 @@ def test_fork_has_compounding_withdrawal_credential(spec, phases, state): )] +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_state +@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS) +def test_fork_inactive_compounding_validator_with_excess_balance(spec, phases, state): + index = 0 + post_spec = phases[ELECTRA] + validator = state.validators[index] + + # set validator balance greater than min_activation_balance + state.balances[index] = post_spec.MIN_ACTIVATION_BALANCE + 1 + # set validator as not active yet + validator.activation_epoch = spec.FAR_FUTURE_EPOCH + # set validator activation eligibility epoch to the latest finalized epoch + validator.activation_eligibility_epoch = state.finalized_checkpoint.epoch + # give the validator compounding withdrawal credentials + validator.withdrawal_credentials = post_spec.COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:] + + post_state = yield from run_fork_test(post_spec, state) + + # the validator cannot be activated again + assert post_state.validators[index].activation_eligibility_epoch == spec.FAR_FUTURE_EPOCH + # the validator should now have a zero balance + assert post_state.balances[index] == 0 + # there should be a single pending deposit for this validator + assert post_state.pending_deposits == [post_spec.PendingDeposit( + pubkey=validator.pubkey, + withdrawal_credentials=validator.withdrawal_credentials, + amount=state.balances[index], + signature=spec.bls.G2_POINT_AT_INFINITY, + slot=spec.GENESIS_SLOT, + )] + + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_state From 8e376dc03b8c9c16d041d3e4579d4e9ede51ba3c Mon Sep 17 00:00:00 2001 From: NC <17676176+ensi321@users.noreply.github.com> Date: Tue, 7 Jan 2025 15:53:27 -0800 Subject: [PATCH 52/76] Use SubnetID for sync committee --- specs/altair/validator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3602377acd..00dca30308 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -295,7 +295,7 @@ The `subnet_id` is derived from the position in the sync committee such that the *Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees. ```python -def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[uint64]: +def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]: next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch): sync_committee = state.current_sync_committee @@ -305,7 +305,7 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali target_pubkey = state.validators[validator_index].pubkey sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey] return set([ - uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) + SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) for index in sync_committee_indices ]) ``` From f02275eb27332f80fb8d82652ee23216c063bd75 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 12:16:07 -0600 Subject: [PATCH 53/76] Revert BlobSidecarsByRoot/Range version bump --- specs/electra/p2p-interface.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index bd9020c0fa..5064676f18 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -19,8 +19,8 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v2](#blobsidecarsbyroot-v2) - - [BlobSidecarsByRange v2](#blobsidecarsbyrange-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) @@ -110,9 +110,9 @@ The following validations are removed: #### Messages -##### BlobSidecarsByRoot v2 +##### BlobSidecarsByRoot v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` *[Modified in Electra:EIP7691]* @@ -136,9 +136,9 @@ Response Content: No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. -##### BlobSidecarsByRange v2 +##### BlobSidecarsByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` *[Modified in Electra:EIP7691]* From db52011909dee88236aa28669479d5fab9d7b006 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 8 Jan 2025 14:11:08 -0600 Subject: [PATCH 54/76] Bump version to 1.5.0-beta.0 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index e7fd637b5a..ba25d3754e 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.5.0-alpha.10 +1.5.0-beta.0 From 454bd57cd0fd0ead7012b1ab81460f2fd7a5f49f Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:20:01 -0600 Subject: [PATCH 55/76] Update config files & fix some nits --- configs/mainnet.yaml | 4 +--- configs/minimal.yaml | 4 +--- specs/bellatrix/p2p-interface.md | 2 +- specs/phase0/p2p-interface.md | 2 +- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index deb3dcf5fe..e54db49661 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -115,15 +115,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 460474ebf7..a15314bb1f 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -116,15 +116,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 5d8425e888..b2d28cf1f4 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -149,7 +149,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in -place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 Mib on the network. +place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction filled entirely with data at a cost of 16 gas per byte can create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index e400dff58c..f3d9038abd 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -199,7 +199,7 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks. | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | From 5127929733ed14c5f06b0dc675f575daaac9a155 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:43:54 -0600 Subject: [PATCH 56/76] Try to polish new paragraphs a bit --- specs/phase0/p2p-interface.md | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index f3d9038abd..ea51d96dfd 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -242,7 +242,7 @@ Maximum message sizes are derived from the maximum payload size that the network ```python def max_compressed_len(n: uint64) -> uint64: - # Worst-case compressed length for a given payload of size n when using snappy + # Worst-case compressed length for a given payload of size n when using snappy: # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 return uint64(32 + n + n / 6) ``` @@ -534,9 +534,9 @@ Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/bl Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: -* the size of the encoded `RPCMsg`, including control messages, framing, topics etc, must not exceed `max_message_size()` -* the size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. -* the size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. +* The size of the encoded `RPCMsg` (including control messages, framing, topics, etc) must not exceed `max_message_size()`. +* The size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. +* The size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. ### The Req/Resp domain @@ -1715,19 +1715,17 @@ It is advisable to derive these lengths from the SSZ type definitions in use, to #### Why is the message size defined in terms of application payload? -When transmitting messages over gossipsub and / or req/resp, we want to ensure that the same payload sizes are supported no matter the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. +When transmitting messages over gossipsub and/or the req/resp domain, we want to ensure that the same payload sizes are supported regardless of the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. -To derive "encoded size limits" from desired application sizes we take into account snappy compression and framing overhead. +To derive "encoded size limits" from desired application sizes, we take into account snappy compression and framing overhead. -In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame - the limit is set such that at least one max-sized application-level message together with a small amount (1kb) of gossipsub overhead is allowed - implementations are free to pack multiple smaller application messages into a single gossipsub frame, and / or combine it with control messages as they see fit. - -The limit is set on the uncompressed payload size in particular to protect against decompression bombs - although +In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. #### Why is there a limit on message sizes at all? -The message size limit protects against several forms of DoS and network-based amplification attacks and provide upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. +The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. -In particular, blocks which at the time of writing is the only message type without a practical SSZ-derived upper bound on size cannot be fully verified synchronously as part of gossipsub validity checks meaning that there exist cases where invalid messages signed by a validator may be amplified by the network. +In particular, blocks—-currently the only message type without a practical SSZ-derived upper bound on size—-cannot be fully verified synchronously as part of gossipsub validity checks. This means that there exist cases where invalid messages signed by a validator may be amplified by the network. ## libp2p implementations matrix From e8eb367da26e908d0f0e7219bc9c5ad45e5b5e7e Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 14:46:16 -0600 Subject: [PATCH 57/76] Fix two more small nits --- specs/phase0/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index ea51d96dfd..ab3306d235 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -199,7 +199,7 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages / RPC chunks | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages and RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | @@ -294,7 +294,7 @@ This defines both the type of data being sent on the topic and how the data fiel - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details. -Clients MUST reject messages with unknown topic. +Clients MUST reject messages with an unknown topic. *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. From d867b84f093fe5270da7e7a49a9c9ea1be7c538c Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 8 Jan 2025 15:34:06 -0600 Subject: [PATCH 58/76] Add back remark about compression bombs --- specs/phase0/p2p-interface.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index ab3306d235..1196fca90d 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -1721,6 +1721,8 @@ To derive "encoded size limits" from desired application sizes, we take into acc In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. +The limit is set on the uncompressed payload size in particular to protect against decompression bombs. + #### Why is there a limit on message sizes at all? The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. From 902c74ee02933a6672c4b898df25b14a4360c131 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 13 Jan 2025 15:53:08 -0600 Subject: [PATCH 59/76] Simplify/optimize process_registry_updates --- specs/electra/beacon-chain.md | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/specs/electra/beacon-chain.md b/specs/electra/beacon-chain.md index c366b9c3f9..c332f2cdcd 100644 --- a/specs/electra/beacon-chain.md +++ b/specs/electra/beacon-chain.md @@ -816,27 +816,24 @@ def process_epoch(state: BeaconState) -> None: #### Modified `process_registry_updates` -*Note*: The function `process_registry_updates` is modified to -use the updated definitions of `initiate_validator_exit` and `is_eligible_for_activation_queue` -and changes how the activation epochs are computed for eligible validators. +*Note*: The function `process_registry_updates` is modified to use the updated definitions of +`initiate_validator_exit` and `is_eligible_for_activation_queue`, changes how the activation epochs +are computed for eligible validators, and processes activations in the same loop as activation +eligibility updates and ejections. ```python def process_registry_updates(state: BeaconState) -> None: - # Process activation eligibility and ejections + current_epoch = get_current_epoch(state) + activation_epoch = compute_activation_exit_epoch(current_epoch) + + # Process activation eligibility, ejections, and activations for index, validator in enumerate(state.validators): if is_eligible_for_activation_queue(validator): # [Modified in Electra:EIP7251] - validator.activation_eligibility_epoch = get_current_epoch(state) + 1 + validator.activation_eligibility_epoch = current_epoch + 1 - if ( - is_active_validator(validator, get_current_epoch(state)) - and validator.effective_balance <= EJECTION_BALANCE - ): + if is_active_validator(validator, current_epoch) and validator.effective_balance <= EJECTION_BALANCE: initiate_validator_exit(state, ValidatorIndex(index)) # [Modified in Electra:EIP7251] - # Activate all eligible validators - # [Modified in Electra:EIP7251] - activation_epoch = compute_activation_exit_epoch(get_current_epoch(state)) - for validator in state.validators: if is_eligible_for_activation(state, validator): validator.activation_epoch = activation_epoch ``` From eba62dbf00132dfdc97fbfab663a99cb23b9e8f1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Wed, 15 Jan 2025 14:24:49 -0600 Subject: [PATCH 60/76] Fix various nits --- README.md | 1 - docker/README.md | 4 +- docs/docs/new-feature.md | 15 ++++++- docs/docs/templates/beacon-chain-template.md | 5 +-- fork_choice/safe-block.md | 1 + solidity_deposit_contract/README.md | 1 + specs/_features/custody_game/beacon-chain.md | 3 -- specs/_features/custody_game/validator.md | 7 ++-- specs/_features/das/das-core.md | 3 -- specs/_features/das/fork-choice.md | 1 - specs/_features/das/p2p-interface.md | 5 +-- specs/_features/das/sampling.md | 2 - specs/_features/eip6800/beacon-chain.md | 2 + specs/_features/eip6800/fork.md | 5 ++- specs/_features/eip6914/beacon-chain.md | 2 + specs/_features/eip6914/fork-choice.md | 2 + specs/_features/eip7732/beacon-chain.md | 2 + specs/_features/eip7732/builder.md | 11 ++++- specs/_features/eip7732/fork-choice.md | 14 ++++++- specs/_features/eip7732/fork.md | 2 + specs/_features/eip7732/p2p-interface.md | 16 +++++--- specs/_features/eip7732/validator.md | 15 ++++--- specs/_features/sharding/beacon-chain.md | 4 +- .../sharding/polynomial-commitments.md | 3 -- specs/_features/whisk/fork.md | 1 - specs/altair/fork.md | 2 + specs/altair/light-client/full-node.md | 2 - specs/altair/light-client/light-client.md | 2 - specs/altair/light-client/p2p-interface.md | 4 +- specs/altair/light-client/sync-protocol.md | 2 - specs/bellatrix/fork-choice.md | 1 + specs/bellatrix/fork.md | 2 + specs/bellatrix/validator.md | 1 - specs/capella/fork-choice.md | 1 + specs/capella/fork.md | 3 +- specs/capella/light-client/full-node.md | 2 - specs/capella/light-client/p2p-interface.md | 2 - specs/capella/light-client/sync-protocol.md | 2 - specs/deneb/fork-choice.md | 1 + specs/deneb/fork.md | 2 + specs/deneb/light-client/full-node.md | 2 - specs/deneb/light-client/p2p-interface.md | 2 - specs/deneb/light-client/sync-protocol.md | 2 - specs/deneb/p2p-interface.md | 2 + specs/deneb/polynomial-commitments.md | 2 - specs/deneb/validator.md | 1 + specs/electra/beacon-chain.md | 2 +- specs/electra/fork.md | 2 + specs/electra/light-client/fork.md | 2 + specs/electra/p2p-interface.md | 2 + specs/electra/validator.md | 2 + specs/fulu/fork-choice.md | 3 ++ specs/fulu/fork.md | 2 + specs/fulu/p2p-interface.md | 2 +- specs/fulu/polynomial-commitments-sampling.md | 2 + specs/phase0/beacon-chain.md | 5 +-- specs/phase0/deposit-contract.md | 1 + specs/phase0/fork-choice.md | 4 +- specs/phase0/p2p-interface.md | 41 ++++++++++--------- specs/phase0/weak-subjectivity.md | 6 +-- ssz/merkle-proofs.md | 3 +- ssz/simple-serialize.md | 3 +- sync/optimistic.md | 1 + tests/README.md | 11 ----- tests/core/pyspec/README.md | 4 ++ tests/formats/README.md | 6 +-- tests/formats/finality/README.md | 2 - tests/formats/fork_choice/README.md | 2 + tests/formats/genesis/initialization.md | 1 - tests/formats/genesis/validity.md | 3 -- tests/formats/operations/README.md | 1 - tests/formats/rewards/README.md | 1 + tests/formats/sanity/blocks.md | 3 -- tests/formats/sanity/slots.md | 3 -- tests/formats/ssz_generic/README.md | 4 -- tests/formats/ssz_static/core.md | 2 - tests/generators/README.md | 5 --- tests/generators/epoch_processing/README.md | 3 -- tests/generators/operations/README.md | 3 -- tests/generators/sanity/README.md | 3 -- 80 files changed, 155 insertions(+), 152 deletions(-) diff --git a/README.md b/README.md index e58927ac9e..5749a89506 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,6 @@ Documentation on the different components used during spec writing can be found Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases). - ## Installation and Usage The consensus-specs repo can be used by running the tests locally or inside a docker container. diff --git a/docker/README.md b/docker/README.md index 34bdd94c51..93a24b4e08 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,10 +1,11 @@ ## Docker related information This dockerfile sets up the dependencies required to run consensus-spec tests. The docker image can be locally built with: -- `docker build ./ -t $IMAGE_NAME -f ./docker/Dockerfile` +- `docker build ./ -t $IMAGE_NAME -f ./docker/Dockerfile` Handy commands: + - `docker run -it $IMAGE_NAME /bin/sh` will give you a shell inside the docker container to manually run any tests - `docker run $IMAGE_NAME make test` will run the make test command inside the docker container @@ -13,6 +14,7 @@ Ideally manual running of docker containers is for advanced users, we recommend The `scripts/build_run_docker_tests.sh` script will cover most use cases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. E.g: + - `./build_run_docker_tests.sh --p mainnet` will run the mainnet preset tests - `./build_run_docker_tests.sh --a` will run all the tests across all the forks - `./build_run_docker_tests.sh --f deneb` will only run deneb tests diff --git a/docs/docs/new-feature.md b/docs/docs/new-feature.md index 78fd1357fc..4813c8d3fd 100644 --- a/docs/docs/new-feature.md +++ b/docs/docs/new-feature.md @@ -1,8 +1,9 @@ # How to add a new feature proposal in consensus-specs +## Table of contents + -## Table of Contents - [A. Make it executable for linter checks](#a-make-it-executable-for-linter-checks) - [1. Create a folder under `./specs/_features`](#1-create-a-folder-under-specs_features) @@ -23,7 +24,6 @@ - ## A. Make it executable for linter checks ### 1. Create a folder under `./specs/_features` @@ -35,6 +35,7 @@ For example, if it's an `EIP-9999` CL spec, you can create a `./specs/_features/ For example, if the latest fork is Capella, use `./specs/capella` content as your "previous fork". ### 3. Write down your proposed `beacon-chain.md` change + - You can either use [Beacon Chain Spec Template](./templates/beacon-chain-template.md), or make a copy of the latest fork content and then edit it. - Tips: - We use [`doctoc`](https://www.npmjs.com/package/doctoc) tool to generate the table of content. @@ -50,8 +51,11 @@ For example, if the latest fork is Capella, use `./specs/capella` content as you - Use simple Python rather than the fancy Python dark magic. ### 4. Add `fork.md` + You can refer to the previous fork's `fork.md` file. + ### 5. Make it executable + - Update Pyspec [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name. - Update helpers for [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py) for building the spec: - Update [`pysetup/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/constants.py) with the new feature name as Pyspec `constants.py` defined. @@ -63,17 +67,21 @@ You can refer to the previous fork's `fork.md` file. ## B: Make it executable for pytest and test generator ### 1. [Optional] Add `light-client/*` docs if you updated the content of `BeaconBlock` + - You can refer to the previous fork's `light-client/*` file. - Add the path of the new markdown files in [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py)'s `get_md_doc_paths` function. ### 2. Add the mainnet and minimal presets and update the configs + - Add presets: `presets/mainnet/.yaml` and `presets/minimal/.yaml` - Update configs: `configs/mainnet.yaml` and `configs/minimal.yaml` ### 3. Update [`context.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py) + - [Optional] Add `with__and_later` decorator for writing pytest cases. e.g., `with_capella_and_later`. ### 4. Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) + - Add `` to `ALL_PHASES` and `TESTGEN_FORKS` ### 5. Update [`genesis.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/genesis.py): @@ -94,6 +102,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold): - If the given feature changes `ExecutionPayload` fields, you have to set the initial values by updating `get_sample_genesis_execution_payload_header` helper. ### 6. Update CI configurations + - Update [GitHub Actions config](https://github.com/ethereum/consensus-specs/blob/dev/.github/workflows/run-tests.yml) - Update `pyspec-tests.strategy.matrix.version` list by adding new feature to it - Update [CircleCI config](https://github.com/ethereum/consensus-specs/blob/dev/.circleci/config.yml) @@ -102,7 +111,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold): ## Others ### Bonus + - Add `validator.md` if honest validator behavior changes with the new feature. ### Need help? + You can tag spec elves for cleaning up your PR. 🧚 diff --git a/docs/docs/templates/beacon-chain-template.md b/docs/docs/templates/beacon-chain-template.md index 02e95d4c4f..e1c53a0511 100644 --- a/docs/docs/templates/beacon-chain-template.md +++ b/docs/docs/templates/beacon-chain-template.md @@ -3,6 +3,7 @@ # -- The Beacon Chain ## Table of contents + @@ -10,8 +11,6 @@ - - ## Introduction ## Notation @@ -28,7 +27,6 @@ ## Preset - ### [CATEGORY OF PRESETS] | Name | Value | @@ -64,5 +62,4 @@ class CONTAINER_NAME(Container): ### Epoch processing - ### Block processing diff --git a/fork_choice/safe-block.md b/fork_choice/safe-block.md index d4af9060d0..404c05f4cf 100644 --- a/fork_choice/safe-block.md +++ b/fork_choice/safe-block.md @@ -1,6 +1,7 @@ # Fork Choice -- Safe Block ## Table of contents + diff --git a/solidity_deposit_contract/README.md b/solidity_deposit_contract/README.md index 298ea92fef..bff544f268 100644 --- a/solidity_deposit_contract/README.md +++ b/solidity_deposit_contract/README.md @@ -14,6 +14,7 @@ In August 2020, version `r2` was released with metadata modifications and relice ## Compiling solidity deposit contract In this directory run: + ```sh make compile_deposit_contract ``` diff --git a/specs/_features/custody_game/beacon-chain.md b/specs/_features/custody_game/beacon-chain.md index 66aea773a7..9a1c00940d 100644 --- a/specs/_features/custody_game/beacon-chain.md +++ b/specs/_features/custody_game/beacon-chain.md @@ -55,7 +55,6 @@ - ## Introduction This document details the beacon chain additions and changes of to support the shard data custody game, @@ -101,7 +100,6 @@ building upon the [Sharding](../sharding/beacon-chain.md) specification. | `MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES` | `uint64(2**4)` (= 16) | | `MAX_CUSTODY_SLASHINGS` | `uint64(2**0)` (= 1) | - ### Size parameters | Name | Value | Unit | @@ -355,7 +353,6 @@ def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epo return (epoch + validator_index % EPOCHS_PER_CUSTODY_PERIOD) // EPOCHS_PER_CUSTODY_PERIOD ``` - ## Per-block processing ### Block processing diff --git a/specs/_features/custody_game/validator.md b/specs/_features/custody_game/validator.md index e711d92a1c..c4eb8c7e5b 100644 --- a/specs/_features/custody_game/validator.md +++ b/specs/_features/custody_game/validator.md @@ -1,8 +1,6 @@ # Custody Game -- Honest Validator **Notice**: This document is a work-in-progress for researchers and implementers. -This is an accompanying document to [Custody Game -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" -participating in the shard data Custody Game. ## Table of contents @@ -24,9 +22,11 @@ participating in the shard data Custody Game. - ## Introduction +This is an accompanying document to [Custody Game -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" +participating in the shard data Custody Game. + ## Prerequisites This document is an extension of the [Sharding -- Validator](../sharding/validator.md). All behaviors and definitions defined in the Sharding doc carry over unless explicitly noted or overridden. @@ -58,7 +58,6 @@ Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./beacon- `attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO]. - ## How to avoid slashing Proposer and Attester slashings described in Phase 0 remain in place with the addition of the following. diff --git a/specs/_features/das/das-core.md b/specs/_features/das/das-core.md index f683cbbe13..b4173d6550 100644 --- a/specs/_features/das/das-core.md +++ b/specs/_features/das/das-core.md @@ -24,7 +24,6 @@ - ## Custom types We define the following Python custom types for type hinting and readability: @@ -33,7 +32,6 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `SampleIndex` | `uint64` | A sample index, corresponding to chunk of extended data | - ## Configuration ### Misc @@ -42,7 +40,6 @@ We define the following Python custom types for type hinting and readability: | - | - | - | | `MAX_RESAMPLE_TIME` | `TODO` (= TODO) | Time window to sample a shard blob and put it on vertical subnets | - ## New containers ### `DASSample` diff --git a/specs/_features/das/fork-choice.md b/specs/_features/das/fork-choice.md index f8ee68eabe..fbd28eb679 100644 --- a/specs/_features/das/fork-choice.md +++ b/specs/_features/das/fork-choice.md @@ -14,7 +14,6 @@ - ## Introduction This document is the beacon chain fork choice spec for Data Availability Sampling. The only change that we add from phase 0 is that we add a concept of "data dependencies"; diff --git a/specs/_features/das/p2p-interface.md b/specs/_features/das/p2p-interface.md index 2c165078d2..491e1fe8f6 100644 --- a/specs/_features/das/p2p-interface.md +++ b/specs/_features/das/p2p-interface.md @@ -95,7 +95,6 @@ Since the messages are content-addressed (instead of origin-stamped), multiple publishers of the same samples on a vertical subnet do not hurt performance, but actually improve it by shortcutting regular propagation on the vertical subnet, and thus lowering the latency to a sample. - ### Vertical subnets Vertical subnets propagate the samples to every peer that is interested. @@ -161,7 +160,6 @@ Take `blob = signed_blob.blob`: The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets. - #### Vertical subnets: `das_sample_{subnet_index}` Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial), @@ -185,7 +183,6 @@ The following validations MUST pass before forwarding the `sample` on the vertic Upon receiving a valid sample, it SHOULD be retained for a buffer period if the local node is part of the backbone that covers this sample. This is to serve other peers that may have missed it. - ## DAS in the Req-Resp domain: Pull To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain. @@ -201,6 +198,7 @@ Note that DAS networking uses a different protocol prefix: `/eth2/das/req` **Protocol ID:** `/eth2/das/req/query/1/` Request Content: + ``` ( sample_index: SampleIndex @@ -208,6 +206,7 @@ Request Content: ``` Response Content: + ``` ( DASSample diff --git a/specs/_features/das/sampling.md b/specs/_features/das/sampling.md index 53685c6509..1f6eaf6378 100644 --- a/specs/_features/das/sampling.md +++ b/specs/_features/das/sampling.md @@ -22,7 +22,6 @@ - ## Data Availability Sampling TODO: Summary of Data Availability problem @@ -45,7 +44,6 @@ TODO TODO - ### DAS during network instability The GossipSub based retrieval of samples may not always work. diff --git a/specs/_features/eip6800/beacon-chain.md b/specs/_features/eip6800/beacon-chain.md index ab935cb870..a5134eda61 100644 --- a/specs/_features/eip6800/beacon-chain.md +++ b/specs/_features/eip6800/beacon-chain.md @@ -1,5 +1,7 @@ # EIP6800 -- The Beacon Chain +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/_features/eip6800/fork.md b/specs/_features/eip6800/fork.md index 14172c9f3e..ed8f9071e8 100644 --- a/specs/_features/eip6800/fork.md +++ b/specs/_features/eip6800/fork.md @@ -1,7 +1,10 @@ # EIP-6800 -- Fork Logic +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents + @@ -15,6 +18,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction @@ -29,7 +33,6 @@ Warning: this configuration is not definitive. | `EIP6800_FORK_VERSION` | `Version('0x05000000')` | | `EIP6800_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | - ## Helper functions ### Misc diff --git a/specs/_features/eip6914/beacon-chain.md b/specs/_features/eip6914/beacon-chain.md index fcb7716f7e..c95d70fb33 100644 --- a/specs/_features/eip6914/beacon-chain.md +++ b/specs/_features/eip6914/beacon-chain.md @@ -1,5 +1,7 @@ # EIP-6914 -- The Beacon Chain +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/_features/eip6914/fork-choice.md b/specs/_features/eip6914/fork-choice.md index 25adc82d61..254d0c398c 100644 --- a/specs/_features/eip6914/fork-choice.md +++ b/specs/_features/eip6914/fork-choice.md @@ -1,5 +1,7 @@ # EIP-6914 -- Fork Choice +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/_features/eip7732/beacon-chain.md b/specs/_features/eip7732/beacon-chain.md index 2303e33d40..461a1f01a0 100644 --- a/specs/_features/eip7732/beacon-chain.md +++ b/specs/_features/eip7732/beacon-chain.md @@ -1,5 +1,7 @@ # EIP-7732 -- The Beacon Chain +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/_features/eip7732/builder.md b/specs/_features/eip7732/builder.md index 7c793b07e6..be60332ec8 100644 --- a/specs/_features/eip7732/builder.md +++ b/specs/_features/eip7732/builder.md @@ -1,7 +1,10 @@ # EIP-7732 -- Honest Builder -This is an accompanying document which describes the expected actions of a "builder" participating in the Ethereum proof-of-stake protocol. +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + @@ -13,9 +16,12 @@ This is an accompanying document which describes the expected actions of a "buil - [Honest payload withheld messages](#honest-payload-withheld-messages) + ## Introduction +This is an accompanying document which describes the expected actions of a "builder" participating in the Ethereum proof-of-stake protocol. + With the EIP-7732 Fork, the protocol includes new staked participants of the protocol called *Builders*. While Builders are a subset of the validator set, they have extra attributions that are optional. Validators may opt to not be builders and as such we collect the set of guidelines for those validators that want to act as builders in this document. ## Builders attributions @@ -113,7 +119,9 @@ To construct the `execution_payload_envelope` the builder must perform the follo After setting these parameters, the builder should run `process_execution_payload(state, signed_envelope, verify=False)` and this function should not trigger an exception. 6. Set `state_root` to `hash_tree_root(state)`. + After preparing the `envelope` the builder should sign the envelope using: + ```python def get_execution_payload_envelope_signature( state: BeaconState, envelope: ExecutionPayloadEnvelope, privkey: int) -> BLSSignature: @@ -121,6 +129,7 @@ def get_execution_payload_envelope_signature( signing_root = compute_signing_root(envelope, domain) return bls.Sign(privkey, signing_root) ``` + The builder assembles then `signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` and broadcasts it on the `execution_payload` global gossip topic. ### Honest payload withheld messages diff --git a/specs/_features/eip7732/fork-choice.md b/specs/_features/eip7732/fork-choice.md index a52e959cba..779b6e7009 100644 --- a/specs/_features/eip7732/fork-choice.md +++ b/specs/_features/eip7732/fork-choice.md @@ -1,6 +1,9 @@ # EIP-7732 -- Fork Choice +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents + @@ -53,6 +56,7 @@ This is the modification of the fork choice accompanying the EIP-7732 upgrade. ## Containers ### New `ChildNode` + Auxiliary class to consider `(block, slot, bool)` LMD voting ```python @@ -65,6 +69,7 @@ class ChildNode(Container): ## Helpers ### Modified `LatestMessage` + **Note:** The class is modified to keep track of the slot instead of the epoch. ```python @@ -75,6 +80,7 @@ class LatestMessage(object): ``` ### Modified `update_latest_messages` + **Note:** the function `update_latest_messages` is updated to use the attestation slot instead of target. Notice that this function is only called on validated attestations and validators cannot attest twice in the same epoch without equivocating. Notice also that target epoch number and slot number are validated on `validate_on_attestation`. ```python @@ -88,6 +94,7 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn ``` ### Modified `Store` + **Note:** `Store` is modified to track the intermediate states of "empty" consensus blocks, that is, those consensus blocks for which the corresponding execution payload has not been revealed or has not been included on chain. ```python @@ -193,6 +200,7 @@ def is_parent_node_full(store: Store, block: BeaconBlock) -> bool: ``` ### Modified `get_ancestor` + **Note:** `get_ancestor` is modified to return whether the chain is based on an *empty* or *full* block. ```python @@ -213,6 +221,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> ChildNode: ``` ### Modified `get_checkpoint_block` + **Note:** `get_checkpoint_block` is modified to use the new `get_ancestor` ```python @@ -224,7 +233,6 @@ def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: return get_ancestor(store, root, epoch_first_slot).root ``` - ### `is_supporting_vote` ```python @@ -245,7 +253,9 @@ def is_supporting_vote(store: Store, node: ChildNode, message: LatestMessage) -> ``` ### New `compute_proposer_boost` + This is a helper to compute the proposer boost. It applies the proposer boost to any ancestor of the proposer boost root taking into account the payload presence. There is one exception: if the requested node has the same root and slot as the block with the proposer boost root, then the proposer boost is applied to both empty and full versions of the node. + ```python def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei: if store.proposer_boost_root == Root(): @@ -264,6 +274,7 @@ def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> ``` ### New `compute_withhold_boost` + This is a similar helper that applies for the withhold boost. In this case this always takes into account the reveal status. ```python @@ -283,6 +294,7 @@ def compute_withhold_boost(store: Store, state: BeaconState, node: ChildNode) -> ``` ### New `compute_reveal_boost` + This is a similar helper to the last two, the only difference is that the reveal boost is only applied to the full version of the node when querying for the same slot as the revealed payload. ```python diff --git a/specs/_features/eip7732/fork.md b/specs/_features/eip7732/fork.md index fa03eb6886..dc61b22160 100644 --- a/specs/_features/eip7732/fork.md +++ b/specs/_features/eip7732/fork.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -17,6 +18,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index a2716933cd..d29c4d153c 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -1,5 +1,10 @@ # EIP-7732 -- Networking +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + @@ -26,6 +31,7 @@ - [ExecutionPayloadEnvelopesByRoot v1](#executionpayloadenvelopesbyroot-v1) + ## Introduction @@ -39,9 +45,9 @@ The specification of these changes continues in the same format as the network s *[Modified in EIP-7732]* -| Name | Value | Description | -|------------------------------------------|-----------------------------------|---------------------------------------------------------------------| -| `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732` | `13` # TODO: Compute it when the spec stabilizes | Merkle proof depth for the `blob_kzg_commitments` list item | +| Name | Value | Description | +|------------------------------------------------|--------------|-------------------------------------------------------------| +| `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_EIP7732` | `13` **TBD** | Merkle proof depth for the `blob_kzg_commitments` list item | ### Configuration @@ -51,7 +57,6 @@ The specification of these changes continues in the same format as the network s |------------------------|----------------|-------------------------------------------------------------------| | `MAX_REQUEST_PAYLOADS` | `2**7` (= 128) | Maximum number of execution payload envelopes in a single request | - ### Containers #### `BlobSidecar` @@ -227,7 +232,6 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | | `EIP7732_FORK_VERSION` | `eip7732.SignedBeaconBlock` | - ##### BlobSidecarsByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/2/` @@ -239,7 +243,6 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | | `EIP7732_FORK_VERSION` | `eip7732.BlobSidecar` | - ##### ExecutionPayloadEnvelopesByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/execution_payload_envelopes_by_root/1/` @@ -267,6 +270,7 @@ Response Content: List[SignedExecutionPayloadEnvelope, MAX_REQUEST_PAYLOADS] ) ``` + Requests execution payload envelopes by `signed_execution_payload_envelope.message.block_root`. The response is a list of `SignedExecutionPayloadEnvelope` whose length is less than or equal to the number of requested execution payload envelopes. It may be less in the case that the responding peer is missing payload envelopes. No more than `MAX_REQUEST_PAYLOADS` may be requested at a time. diff --git a/specs/_features/eip7732/validator.md b/specs/_features/eip7732/validator.md index 3a4d4e93ea..b4ebe0e510 100644 --- a/specs/_features/eip7732/validator.md +++ b/specs/_features/eip7732/validator.md @@ -1,11 +1,14 @@ # EIP-7732 -- Honest Validator -This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork. +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + -**Table of Contents** +- [Introduction](#introduction) - [Validator assignment](#validator-assignment) - [Lookahead](#lookahead) - [Beacon chain responsibilities](#beacon-chain-responsibilities) @@ -19,6 +22,11 @@ This document represents the changes and additions to the Honest validator guide - [Constructing a payload attestation](#constructing-a-payload-attestation) + + +## Introduction + +This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork. ## Validator assignment @@ -66,7 +74,6 @@ Attestation duties are not changed for validators, however the attestation deadl Sync committee duties are not changed for validators, however the submission deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`. - ### Block proposal Validators are still expected to propose `SignedBeaconBlock` at the beginning of any slot during which `is_proposer(state, validator_index)` returns `true`. The mechanism to prepare this beacon block and related sidecars differs from previous forks as follows @@ -130,5 +137,3 @@ def get_payload_attestation_message_signature( ``` **Remark** Validators do not need to check the full validity of the `ExecutionPayload` contained in within the envelope, but the checks in the [P2P guide](./p2p-interface.md) should pass for the `SignedExecutionPayloadEnvelope`. - - diff --git a/specs/_features/sharding/beacon-chain.md b/specs/_features/sharding/beacon-chain.md index 35b6776028..c7a8fb5fe4 100644 --- a/specs/_features/sharding/beacon-chain.md +++ b/specs/_features/sharding/beacon-chain.md @@ -45,7 +45,6 @@ - ## Introduction This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding, @@ -57,7 +56,6 @@ using KZG10 commitments to commit to data to remove any need for fraud proofs (a - **Data**: A list of KZG points, to translate a byte string into - **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions. - ## Constants The following values are (non-configurable) constants used throughout the specification. @@ -72,7 +70,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `DOMAIN_SHARD_SAMPLE` | `DomainType('0x10000000')` | +| `DOMAIN_SHARD_SAMPLE` | `DomainType('0x10000000')` | ## Preset diff --git a/specs/_features/sharding/polynomial-commitments.md b/specs/_features/sharding/polynomial-commitments.md index 865328597e..7dc756acc0 100644 --- a/specs/_features/sharding/polynomial-commitments.md +++ b/specs/_features/sharding/polynomial-commitments.md @@ -44,7 +44,6 @@ - ## Introduction This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the sharding specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations, and hints what the best known algorithms for these implementations are included below. @@ -58,7 +57,6 @@ This document specifies basic polynomial operations and KZG polynomial commitmen | `BLS_MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) | | `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS | - ### KZG Trusted setup | Name | Value | @@ -259,7 +257,6 @@ def multiply_polynomials(a: BLSPolynomialByCoefficients, b: BLSPolynomialByCoeff return r ``` - #### `interpolate_polynomial` ```python diff --git a/specs/_features/whisk/fork.md b/specs/_features/whisk/fork.md index 300d191296..46bb5ca07f 100644 --- a/specs/_features/whisk/fork.md +++ b/specs/_features/whisk/fork.md @@ -19,7 +19,6 @@ This document describes the process of Whisk upgrade. - ``` """ WHISK_FORK_EPOCH diff --git a/specs/altair/fork.md b/specs/altair/fork.md index a25050a17a..17b0b79c80 100644 --- a/specs/altair/fork.md +++ b/specs/altair/fork.md @@ -2,6 +2,7 @@ ## Table of contents + @@ -15,6 +16,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md index b3b65b83fa..2cda350878 100644 --- a/specs/altair/light-client/full-node.md +++ b/specs/altair/light-client/full-node.md @@ -1,7 +1,5 @@ # Altair Light Client -- Full Node -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/altair/light-client/light-client.md b/specs/altair/light-client/light-client.md index 545c36a755..bd240af3da 100644 --- a/specs/altair/light-client/light-client.md +++ b/specs/altair/light-client/light-client.md @@ -1,7 +1,5 @@ # Altair Light Client -- Light Client -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/altair/light-client/p2p-interface.md b/specs/altair/light-client/p2p-interface.md index e1fe7487db..c8734bb71c 100644 --- a/specs/altair/light-client/p2p-interface.md +++ b/specs/altair/light-client/p2p-interface.md @@ -1,7 +1,5 @@ # Altair Light Client -- Networking -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents @@ -158,6 +156,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: **Protocol ID:** `/eth2/beacon_chain/req/light_client_updates_by_range/1/` Request Content: + ``` ( start_period: uint64 @@ -166,6 +165,7 @@ Request Content: ``` Response Content: + ``` ( List[LightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES] diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index 8cb5543bf3..f85c55258d 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -1,7 +1,5 @@ # Altair Light Client -- Sync Protocol -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index 17fb8e024d..b64b214a8c 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -1,6 +1,7 @@ # Bellatrix -- Fork Choice ## Table of contents + diff --git a/specs/bellatrix/fork.md b/specs/bellatrix/fork.md index 569dccdc66..a00d8b32cc 100644 --- a/specs/bellatrix/fork.md +++ b/specs/bellatrix/fork.md @@ -2,6 +2,7 @@ ## Table of contents + @@ -15,6 +16,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/bellatrix/validator.md b/specs/bellatrix/validator.md index cb9dda05d6..3de1e807f0 100644 --- a/specs/bellatrix/validator.md +++ b/specs/bellatrix/validator.md @@ -124,7 +124,6 @@ To obtain an execution payload, a block proposer building a block on top of a `s * `finalized_block_hash` is the block hash of the latest finalized execution payload (`Hash32()` if none yet finalized) * `suggested_fee_recipient` is the value suggested to be used for the `fee_recipient` field of the execution payload - ```python def prepare_execution_payload(state: BeaconState, safe_block_hash: Hash32, diff --git a/specs/capella/fork-choice.md b/specs/capella/fork-choice.md index a66410cf0b..3300a5c950 100644 --- a/specs/capella/fork-choice.md +++ b/specs/capella/fork-choice.md @@ -1,6 +1,7 @@ # Capella -- Fork Choice ## Table of contents + diff --git a/specs/capella/fork.md b/specs/capella/fork.md index 73d4ba2b71..867f26dfba 100644 --- a/specs/capella/fork.md +++ b/specs/capella/fork.md @@ -2,6 +2,7 @@ ## Table of contents + @@ -15,6 +16,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction @@ -27,7 +29,6 @@ This document describes the process of the Capella upgrade. | `CAPELLA_FORK_VERSION` | `Version('0x03000000')` | | `CAPELLA_FORK_EPOCH` | `Epoch(194048)` (April 12, 2023, 10:27:35pm UTC) | - ## Helper functions ### Misc diff --git a/specs/capella/light-client/full-node.md b/specs/capella/light-client/full-node.md index 319fb1c944..61e03e8ae6 100644 --- a/specs/capella/light-client/full-node.md +++ b/specs/capella/light-client/full-node.md @@ -1,7 +1,5 @@ # Capella Light Client -- Full Node -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/capella/light-client/p2p-interface.md b/specs/capella/light-client/p2p-interface.md index b6c1ec0808..98a5e5fe0f 100644 --- a/specs/capella/light-client/p2p-interface.md +++ b/specs/capella/light-client/p2p-interface.md @@ -1,7 +1,5 @@ # Capella Light Client -- Networking -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/capella/light-client/sync-protocol.md b/specs/capella/light-client/sync-protocol.md index b241b21378..abf44051c4 100644 --- a/specs/capella/light-client/sync-protocol.md +++ b/specs/capella/light-client/sync-protocol.md @@ -1,7 +1,5 @@ # Capella Light Client -- Sync Protocol -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 836fe61743..3fce780efd 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -1,6 +1,7 @@ # Deneb -- Fork Choice ## Table of contents + diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md index 94168dbc7b..ff28c94186 100644 --- a/specs/deneb/fork.md +++ b/specs/deneb/fork.md @@ -2,6 +2,7 @@ ## Table of contents + @@ -15,6 +16,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/deneb/light-client/full-node.md b/specs/deneb/light-client/full-node.md index 424723667c..17f4c50071 100644 --- a/specs/deneb/light-client/full-node.md +++ b/specs/deneb/light-client/full-node.md @@ -1,7 +1,5 @@ # Deneb Light Client -- Full Node -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/deneb/light-client/p2p-interface.md b/specs/deneb/light-client/p2p-interface.md index 0ca53056a9..ee49981231 100644 --- a/specs/deneb/light-client/p2p-interface.md +++ b/specs/deneb/light-client/p2p-interface.md @@ -1,7 +1,5 @@ # Deneb Light Client -- Networking -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/deneb/light-client/sync-protocol.md b/specs/deneb/light-client/sync-protocol.md index 38aa3897b3..53fb6e975f 100644 --- a/specs/deneb/light-client/sync-protocol.md +++ b/specs/deneb/light-client/sync-protocol.md @@ -1,7 +1,5 @@ # Deneb Light Client -- Sync Protocol -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index e38a50ba2e..2ea35ad500 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -341,6 +341,7 @@ When clients use the local execution layer to retrieve blobs, they MUST behave a *[New in Deneb:EIP4844]* Request Content: + ``` ( start_slot: Slot @@ -349,6 +350,7 @@ Request Content: ``` Response Content: + ``` ( List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index f73b17309b..24f4e471c8 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -150,7 +150,6 @@ def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]: ### BLS12-381 helpers - #### `multi_exp` This function performs a multi-scalar multiplication between `points` and `integers`. `points` can either be in G1 or G2. @@ -381,7 +380,6 @@ def verify_kzg_proof(commitment_bytes: Bytes48, bytes_to_kzg_proof(proof_bytes)) ``` - #### `verify_kzg_proof_impl` ```python diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index fa2bfb5d1a..948ba1b664 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -153,6 +153,7 @@ To construct a `BlobSidecar`, a `blob_sidecar` is defined with the necessary con Blobs associated with a block are packaged into sidecar objects for distribution to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic. Each `sidecar` is obtained from: + ```python def get_blob_sidecars(signed_block: SignedBeaconBlock, blobs: Sequence[Blob], diff --git a/specs/electra/beacon-chain.md b/specs/electra/beacon-chain.md index c332f2cdcd..d6854e5e7a 100644 --- a/specs/electra/beacon-chain.md +++ b/specs/electra/beacon-chain.md @@ -168,7 +168,7 @@ The following values are (non-configurable) constants used throughout the specif ### State list lengths | Name | Value | Unit | -| - | - | :-: | +| - | - | - | | `PENDING_DEPOSITS_LIMIT` | `uint64(2**27)` (= 134,217,728) | pending deposits | | `PENDING_PARTIAL_WITHDRAWALS_LIMIT` | `uint64(2**27)` (= 134,217,728) | pending partial withdrawals | | `PENDING_CONSOLIDATIONS_LIMIT` | `uint64(2**18)` (= 262,144) | pending consolidations | diff --git a/specs/electra/fork.md b/specs/electra/fork.md index b908c542ea..879e548853 100644 --- a/specs/electra/fork.md +++ b/specs/electra/fork.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -17,6 +18,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/electra/light-client/fork.md b/specs/electra/light-client/fork.md index 902c1d6bf3..da44ee4e40 100644 --- a/specs/electra/light-client/fork.md +++ b/specs/electra/light-client/fork.md @@ -1,5 +1,7 @@ # Electra Light Client -- Fork Logic +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 5064676f18..46e90250e0 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -1,5 +1,7 @@ # Electra -- Networking +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/electra/validator.md b/specs/electra/validator.md index 3620c30790..ee59a278cd 100644 --- a/specs/electra/validator.md +++ b/specs/electra/validator.md @@ -1,5 +1,7 @@ # Electra -- Honest Validator +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/fulu/fork-choice.md b/specs/fulu/fork-choice.md index 4a20906804..ff2014eba9 100644 --- a/specs/fulu/fork-choice.md +++ b/specs/fulu/fork-choice.md @@ -1,6 +1,9 @@ # Fulu -- Fork Choice +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents + diff --git a/specs/fulu/fork.md b/specs/fulu/fork.md index e496467212..009bb6c943 100644 --- a/specs/fulu/fork.md +++ b/specs/fulu/fork.md @@ -4,6 +4,7 @@ ## Table of contents + @@ -17,6 +18,7 @@ - [Upgrading the state](#upgrading-the-state) + ## Introduction diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index 73d96192ff..bd4e2fb89c 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -234,7 +234,7 @@ Response Content: ``` ( - List[DataColumnSidecar, MAX_REQUEST_DATA_COLUMN_SIDECARS] + List[DataColumnSidecar, MAX_REQUEST_DATA_COLUMN_SIDECARS] ) ``` diff --git a/specs/fulu/polynomial-commitments-sampling.md b/specs/fulu/polynomial-commitments-sampling.md index c85e870896..4ef9f40cdd 100644 --- a/specs/fulu/polynomial-commitments-sampling.md +++ b/specs/fulu/polynomial-commitments-sampling.md @@ -1,5 +1,7 @@ # Fulu -- Polynomial Commitments Sampling +**Notice**: This document is a work-in-progress for researchers and implementers. + ## Table of contents diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 3d860d4a3e..926e0fdd7e 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -1,6 +1,7 @@ # Phase 0 -- The Beacon Chain ## Table of contents + @@ -1420,25 +1421,21 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH) ``` - ```python def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei: return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT) ``` - ```python def get_finality_delay(state: BeaconState) -> uint64: return get_previous_epoch(state) - state.finalized_checkpoint.epoch ``` - ```python def is_in_inactivity_leak(state: BeaconState) -> bool: return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY ``` - ```python def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]: previous_epoch = get_previous_epoch(state) diff --git a/specs/phase0/deposit-contract.md b/specs/phase0/deposit-contract.md index 039b5998b9..29346034a9 100644 --- a/specs/phase0/deposit-contract.md +++ b/specs/phase0/deposit-contract.md @@ -1,6 +1,7 @@ # Phase 0 -- Deposit Contract ## Table of contents + diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index e7b4d1c28c..8b3066a65d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -1,6 +1,7 @@ # Phase 0 -- Beacon Chain Fork Choice ## Table of contents + @@ -79,7 +80,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass 4) **Manual forks**: Manual forks may arbitrarily change the fork choice rule but are expected to be enacted at epoch transitions, with the fork details reflected in `state.fork`. 5) **Implementation**: The implementation found in this specification is constructed for ease of understanding rather than for optimization in computation, space, or any other resource. A number of optimized alternatives can be found [here](https://github.com/protolambda/lmd-ghost). - ### Constant | Name | Value | @@ -559,7 +559,6 @@ def on_tick_per_slot(store: Store, time: uint64) -> None: #### `on_attestation` helpers - ##### `validate_target_epoch_against_current_time` ```python @@ -627,7 +626,6 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root) ``` - ### Handlers #### `on_tick` diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 240b3ad2cf..41a7b40bb7 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -1,6 +1,7 @@ # Phase 0 -- Networking ## Table of contents + @@ -184,8 +185,8 @@ We define the following Python custom types for type hinting and readability: | Name | SSZ equivalent | Description | | - | - | - | -| `NodeID` | `uint256` | node identifier | -| `SubnetID` | `uint64` | subnet identifier | +| `NodeID` | `uint256` | node identifier | +| `SubnetID` | `uint64` | subnet identifier | ### Constants @@ -198,16 +199,16 @@ We define the following Python custom types for type hinting and readability: This section outlines configurations that are used in this spec. | Name | Value | Description | -|---|---|---| +| - | - | - | | `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages and RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | -| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | -| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes. | +| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated | +| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes | | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `DomainType('0x00000000')` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | | `MESSAGE_DOMAIN_VALID_SNAPPY` | `DomainType('0x01000000')` | 4-byte domain for gossip message-id isolation of *valid* snappy messages | -| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. | +| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to | | `ATTESTATION_SUBNET_COUNT` | `2**6` (= 64) | The number of attestation subnets used in the gossipsub protocol. | | `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | | `ATTESTATION_SUBNET_PREFIX_BITS` | `int(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | | @@ -422,7 +423,6 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ `get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root` - ###### `voluntary_exit` The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network. @@ -497,8 +497,6 @@ The following validations MUST pass before forwarding the `attestation` on the s `get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` - - ##### Attestations and Aggregation Attestation broadcasting is grouped into subnets defined by a topic. @@ -729,6 +727,7 @@ Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload **Protocol ID:** ``/eth2/beacon_chain/req/status/1/`` Request, Response Content: + ``` ( fork_digest: ForkDigest @@ -738,6 +737,7 @@ Request, Response Content: head_slot: Slot ) ``` + The fields are, as seen by the client at the time of sending the message: - `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where @@ -775,11 +775,13 @@ Implementers are free to implement such behavior in their own way. **Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/`` Request, Response Content: + ``` ( uint64 ) ``` + Client MAY send goodbye messages upon disconnection. The reason field MAY be one of the following values: - 1: Client shut down. @@ -799,6 +801,7 @@ The response MUST consist of a single `response_chunk`. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/` Request Content: + ``` ( start_slot: Slot @@ -808,6 +811,7 @@ Request Content: ``` Response Content: + ``` ( List[SignedBeaconBlock, MAX_REQUEST_BLOCKS] @@ -1008,9 +1012,9 @@ Specifications of these parameters can be found in the [ENR Specification](http: The ENR `attnets` entry signifies the attestation subnet bitfield with the following form to more easily discover peers participating in particular attestation gossip subnets. -| Key | Value | -|:-------------|:-------------------------------------------------| -| `attnets` | SSZ `Bitvector[ATTESTATION_SUBNET_COUNT]` | +| Key | Value | +|:----------|:------------------------------------------| +| `attnets` | SSZ `Bitvector[ATTESTATION_SUBNET_COUNT]` | If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `attnets` entry with the same value as `MetaData.attnets`. @@ -1021,17 +1025,17 @@ If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended Ethereum network. -| Key | Value | -|:-------------|:--------------------| -| `eth2` | SSZ `ENRForkID` | +| Key | Value | +|:-------|:----------------| +| `eth2` | SSZ `ENRForkID` | Specifically, the value of the `eth2` key MUST be the following SSZ encoded object (`ENRForkID`) ``` ( - fork_digest: ForkDigest - next_fork_version: Version - next_fork_epoch: Epoch + fork_digest: ForkDigest + next_fork_version: Version + next_fork_epoch: Epoch ) ``` @@ -1340,7 +1344,6 @@ Some examples of where messages could be duplicated: - `seen_ttl`: `SLOTS_PER_EPOCH * SECONDS_PER_SLOT / heartbeat_interval = approx. 550`. Attestation gossip validity is bounded by an epoch, so this is the safe max bound. - #### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets? For some gossip channels (e.g. those for Attestations and BeaconBlocks), diff --git a/specs/phase0/weak-subjectivity.md b/specs/phase0/weak-subjectivity.md index 00ab559b32..9d20b6e41e 100644 --- a/specs/phase0/weak-subjectivity.md +++ b/specs/phase0/weak-subjectivity.md @@ -41,19 +41,19 @@ This document uses data structures, constants, functions, and terminology from ## Custom Types | Name | SSZ Equivalent | Description | -|---|---|---| +| - | - | - | | `Ether` | `uint64` | an amount in Ether | ## Constants | Name | Value | -|---|---| +| - | - | | `ETH_TO_GWEI` | `uint64(10**9)` | ## Configuration | Name | Value | -|---|---| +| - | - | | `SAFETY_DECAY` | `uint64(10)` | ## Weak Subjectivity Checkpoint diff --git a/ssz/merkle-proofs.md b/ssz/merkle-proofs.md index 36d0b83ac1..eb1fcaf343 100644 --- a/ssz/merkle-proofs.md +++ b/ssz/merkle-proofs.md @@ -1,8 +1,7 @@ # Merkle proof formats -**Notice**: This document is a work-in-progress for researchers and implementers. - ## Table of contents + diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index b4dedfd0e2..1275904685 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -1,6 +1,7 @@ # SimpleSerialize (SSZ) ## Table of contents + @@ -34,7 +35,7 @@ ## Constants | Name | Value | Description | -|-|-|-| +| - | - | - | | `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. | | `BYTES_PER_LENGTH_OFFSET` | `4` | Number of bytes per serialized length offset. | | `BITS_PER_BYTE` | `8` | Number of bits per byte. | diff --git a/sync/optimistic.md b/sync/optimistic.md index fd83f9a691..f29b29b3d7 100644 --- a/sync/optimistic.md +++ b/sync/optimistic.md @@ -1,6 +1,7 @@ # Optimistic Sync ## Table of contents + diff --git a/tests/README.md b/tests/README.md index dc2e02439d..261dcff177 100644 --- a/tests/README.md +++ b/tests/README.md @@ -123,8 +123,6 @@ More `yield` statements. The output of a consensus test is: 5. `'post'` 6. The state after the test - - ```python # One vote for the eth1 assert len(state.eth1_data_votes) == pre_eth1_votes + 1 @@ -142,7 +140,6 @@ Finally we assertions that test the transition was legitimate. In this case we h 2. The new block's `parent_root` is the same as the block in the previous location 3. The random data that every block includes was changed. - ## New Tests The easiest way to write a new test is to copy and modify an existing one. For example, @@ -173,7 +170,6 @@ def next_slot(spec, state): This looks like exactly what we need. So we add this call before we create the empty block: - ```python . . @@ -191,8 +187,6 @@ This looks like exactly what we need. So we add this call before we create the e That's it. Our new test works (copy `test_empty_block_transition`, rename it, add the `next_slot` call, and then run it to verify this). - - ## Tests Designed to Fail It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol @@ -260,7 +254,6 @@ for the current state. This is the way we specify that a test is designed to fail - failed tests have no post state, because the processing mechanism errors out before creating it. - ## Attestation Tests The consensus layer doesn't provide any direct functionality to end users. It does @@ -280,7 +273,6 @@ which is how they get on chain to form consensus, reward honest validators, etc. [You can see a simple successful attestation test here](https://github.com/ethereum/consensus-specs/blob/926e5a3d722df973b9a12f12c015783de35cafa9/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py#L26-L30): Lets go over it line by line. - ```python @with_all_phases @spec_state_test @@ -292,7 +284,6 @@ def test_success(spec, state): creates a valid attestation (which can then be modified to make it invalid if needed). To see an attestion "from the inside" we need to follow it. - > ```python > def get_valid_attestation(spec, > state, @@ -372,7 +363,6 @@ Currently a single block is sufficient, but that may change in the future. [This function](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L13-L50) processes the attestation and returns the result. - ### Adding an Attestation Test Attestations can't happen in the same block as the one about which they are attesting, or in a block that is @@ -380,7 +370,6 @@ after the block is finalized. This is specified as part of the specs, in the `pr (which is created from the spec by the `make pyspec` command you ran earlier). Here is the relevant code fragment: - ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: data = attestation.data diff --git a/tests/core/pyspec/README.md b/tests/core/pyspec/README.md index 2fdd107fb1..1b20176a42 100644 --- a/tests/core/pyspec/README.md +++ b/tests/core/pyspec/README.md @@ -16,21 +16,25 @@ However, most of the tests can be run in generator-mode, to output test vectors ### How to run tests To run all tests: + ```shell make test ``` To run all tests under the minimal preset: + ```shell make test preset=minimal ``` Or, to run a specific test function specify `k=`: + ```shell make test k=test_verify_kzg_proof ``` Or, to run a specific test function under a single fork specify `k=`: + ```shell make test fork=phase0 ``` diff --git a/tests/formats/README.md b/tests/formats/README.md index 4811595ba8..b19a6ecdca 100644 --- a/tests/formats/README.md +++ b/tests/formats/README.md @@ -3,6 +3,7 @@ This document defines the YAML format and structure used for consensus spec testing. ## Table of contents + * [About](#about) @@ -46,7 +47,6 @@ Test formats: - [`ssz_static`](./ssz_static/README.md) - More formats are planned, see tracking issues for CI/testing - ## Glossary - `generator`: a program that outputs one or more test-cases, each organized into a `config > runner > handler > suite` hierarchy. @@ -93,7 +93,6 @@ The aim is to provide clients with a well-defined scope of work to run a particu - Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners. - Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible. - ## Test structure ``` @@ -152,7 +151,6 @@ Between all types of tests, a few formats are common: - **`.ssz_snappy`**: Like `.ssz`, but compressed with Snappy block compression. Snappy block compression is already applied to SSZ in consensus-layer gossip, available in client implementations, and thus chosen as compression method. - #### Special output parts ##### `meta.yaml` @@ -181,7 +179,6 @@ The format matches that of the `mainnet_config.yaml` and `minimal_config.yaml`, see the [`/configs`](../../configs/README.md#format) documentation. Config values that are introduced at a later fork may be omitted from tests of previous forks. - ## Config sourcing The constants configurations are located in: @@ -198,7 +195,6 @@ And copied by CI for testing purposes to: The first `` is a directory, which contains exactly all tests that make use of the given config. - ## Note for implementers The basic pattern for test-suite loading and running is: diff --git a/tests/formats/finality/README.md b/tests/formats/finality/README.md index af39f5c8ca..70d27dbe18 100644 --- a/tests/formats/finality/README.md +++ b/tests/formats/finality/README.md @@ -20,7 +20,6 @@ An SSZ-snappy encoded `BeaconState`, the state before running the block transiti Also available as `pre.ssz_snappy`. - ### `blocks_.yaml` A series of files, with `` in range `[0, blocks_count)`. Blocks need to be processed in order, @@ -34,7 +33,6 @@ Each block is also available as `blocks_.ssz_snappy` An SSZ-snappy encoded `BeaconState`, the state after applying the block transitions. - ## Condition The resulting state should match the expected `post` state, or if the `post` state is left blank, diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 37d09f4787..9915df90db 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -3,6 +3,7 @@ The aim of the fork choice tests is to provide test coverage of the various components of the fork choice. ## Table of contents + @@ -190,6 +191,7 @@ should_override_forkchoice_update: { -- [New in Bellatrix] ``` For example: + ```yaml - checks: time: 192 diff --git a/tests/formats/genesis/initialization.md b/tests/formats/genesis/initialization.md index 9848e157d9..008b30ef07 100644 --- a/tests/formats/genesis/initialization.md +++ b/tests/formats/genesis/initialization.md @@ -11,7 +11,6 @@ eth1_block_hash: Bytes32 -- A `Bytes32` hex encoded, with prefix 0x. The root o eth1_timestamp: int -- An integer. The timestamp of the block, in seconds. ``` - ### `meta.yaml` A yaml file to help read the deposit count: diff --git a/tests/formats/genesis/validity.md b/tests/formats/genesis/validity.md index 15236c3ba3..a1bde295db 100644 --- a/tests/formats/genesis/validity.md +++ b/tests/formats/genesis/validity.md @@ -16,17 +16,14 @@ description: string -- Optional. Description of test case, purely for debuggi An SSZ-snappy encoded `BeaconState`, the state to validate as genesis candidate. - ### `is_valid.yaml` A boolean, true if the genesis state is deemed valid as to launch with, false otherwise. - ## Processing To process the data, call `is_valid_genesis_state(genesis)`. - ## Condition The result of calling `is_valid_genesis_state(genesis)` should match the expected `is_valid` boolean. diff --git a/tests/formats/operations/README.md b/tests/formats/operations/README.md index 7c3281e2c6..4734b55de1 100644 --- a/tests/formats/operations/README.md +++ b/tests/formats/operations/README.md @@ -24,7 +24,6 @@ An SSZ-snappy encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`. An SSZ-snappy encoded `BeaconState`, the state after applying the operation. No value if operation processing is aborted. - ## Condition A handler of the `operations` test-runner should process these cases, diff --git a/tests/formats/rewards/README.md b/tests/formats/rewards/README.md index c7f3a9581b..47dc7e2b8d 100644 --- a/tests/formats/rewards/README.md +++ b/tests/formats/rewards/README.md @@ -5,6 +5,7 @@ There is no "change" factor, the rewards/penalties outputs are pure functions wi (See test condition documentation on how to run the tests.) `Deltas` is defined as: + ```python class Deltas(Container): rewards: List[Gwei, VALIDATOR_REGISTRY_LIMIT] diff --git a/tests/formats/sanity/blocks.md b/tests/formats/sanity/blocks.md index 7ea646b9e0..480adb1b3c 100644 --- a/tests/formats/sanity/blocks.md +++ b/tests/formats/sanity/blocks.md @@ -13,12 +13,10 @@ reveal_deadlines_setting: int -- see general test-format spec. blocks_count: int -- the number of blocks processed in this test. ``` - ### `pre.ssz_snappy` An SSZ-snappy encoded `BeaconState`, the state before running the block transitions. - ### `blocks_.ssz_snappy` A series of files, with `` in range `[0, blocks_count)`. Blocks need to be processed in order, @@ -30,7 +28,6 @@ Each file is a SSZ-snappy encoded `SignedBeaconBlock`. An SSZ-snappy encoded `BeaconState`, the state after applying the block transitions. - ## Condition The resulting state should match the expected `post` state, or if the `post` state is left blank, diff --git a/tests/formats/sanity/slots.md b/tests/formats/sanity/slots.md index f1b8a13219..953d2566b6 100644 --- a/tests/formats/sanity/slots.md +++ b/tests/formats/sanity/slots.md @@ -11,14 +11,12 @@ description: string -- Optional. Description of test case, purely for debuggi bls_setting: int -- see general test-format spec. ``` - ### `pre.ssz_snappy` An SSZ-snappy `BeaconState`, the state before running the transitions. Also available as `pre.ssz_snappy`. - ### `slots.yaml` An integer. The amount of slots to process (i.e. the difference in slots between pre and post), always a positive number. @@ -29,7 +27,6 @@ An SSZ-snappy `BeaconState`, the state after applying the transitions. Also available as `post.ssz_snappy`. - ### Processing The transition with pure time, no blocks, is known as `process_slots(state, slot)` in the spec. diff --git a/tests/formats/ssz_generic/README.md b/tests/formats/ssz_generic/README.md index 3545ab28c9..9e624f1ae1 100644 --- a/tests/formats/ssz_generic/README.md +++ b/tests/formats/ssz_generic/README.md @@ -23,7 +23,6 @@ The `ssz_generic` tests are split up into different handler, each specialized in - Containers - `containers` - ## Format For each type, a `valid` and an `invalid` suite is implemented. @@ -74,7 +73,6 @@ The `serialized` data should simply not be decoded without raising an error. Note that for some type declarations in the invalid suite, the type itself may technically be invalid. This is a valid way of detecting `invalid` data too. E.g. a 0-length basic vector. - ## Type declarations Most types are not as static, and can reasonably be constructed during test runtime from the test case name. @@ -97,7 +95,6 @@ Data: {length}: an unsigned integer ``` - ### `bitlist` ``` @@ -110,7 +107,6 @@ Data: {limit}: the list limit, in bits, of the bitlist. Does not include the length-delimiting bit in the serialized form. ``` - ### `bitvector` ``` diff --git a/tests/formats/ssz_static/core.md b/tests/formats/ssz_static/core.md index a198bbcaff..dd969c3f54 100644 --- a/tests/formats/ssz_static/core.md +++ b/tests/formats/ssz_static/core.md @@ -34,7 +34,6 @@ The SSZ-snappy encoded bytes. The same value as `serialized.ssz_snappy`, represented as YAML. - ## Condition A test-runner can implement the following assertions: @@ -46,7 +45,6 @@ A test-runner can implement the following assertions: and verify if the bytes match the original `serialized`. - Hash-tree-root: After parsing the `value` (or deserializing `serialized`), Hash-tree-root it: the output should match `root` - ## References **`serialized`**—[SSZ serialization](../../../ssz/simple-serialize.md#serialization) diff --git a/tests/generators/README.md b/tests/generators/README.md index 270d107ea5..f595a31043 100644 --- a/tests/generators/README.md +++ b/tests/generators/README.md @@ -14,7 +14,6 @@ An automated nightly tests release system, with a config filter applied, is bein - - [How to run generators](#how-to-run-generators) - [Cleaning](#cleaning) - [Running all test generators](#running-all-test-generators) @@ -25,8 +24,6 @@ An automated nightly tests release system, with a config filter applied, is bein - - ## How to run generators Prerequisites: @@ -52,7 +49,6 @@ make -j 4 gen_all The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores. - ### Running a single generator The makefile auto-detects generators in the `tests/generators` directory and provides a tests-gen target (gen_) for each generator. See example: @@ -194,7 +190,6 @@ To add a new test generator that builds `New Tests`: However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case. Do note that generators should be easy to maintain, lean, and based on the spec. - ## How to remove a test generator If a test generator is not needed anymore, undo the steps described above and make a new release: diff --git a/tests/generators/epoch_processing/README.md b/tests/generators/epoch_processing/README.md index c572993d23..203f93ec10 100644 --- a/tests/generators/epoch_processing/README.md +++ b/tests/generators/epoch_processing/README.md @@ -6,6 +6,3 @@ An epoch-processing test-runner can consume these sub-transition test-suites, and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler. Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../formats/epoch_processing/README.md). - - - diff --git a/tests/generators/operations/README.md b/tests/generators/operations/README.md index 234bb92a82..29f64295e0 100644 --- a/tests/generators/operations/README.md +++ b/tests/generators/operations/README.md @@ -7,6 +7,3 @@ An operation test-runner can consume these operation test-suites, and handle different kinds of operations by processing the cases using the specified test handler. Information on the format of the tests can be found in the [operations test formats documentation](../../formats/operations/README.md). - - - diff --git a/tests/generators/sanity/README.md b/tests/generators/sanity/README.md index 61979976db..9a5f5b25d6 100644 --- a/tests/generators/sanity/README.md +++ b/tests/generators/sanity/README.md @@ -3,6 +3,3 @@ Sanity tests cover regular state-transitions in a common block-list format, to ensure the basics work. Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md). - - - From 8d35951e942c8d18ddb97853f95137b90cc2e6d0 Mon Sep 17 00:00:00 2001 From: Nico Flaig Date: Thu, 16 Jan 2025 16:49:06 +0000 Subject: [PATCH 61/76] Document Electra fork-digest for BeaconBlocksByRange/Root v2 --- specs/electra/p2p-interface.md | 38 ++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 46e90250e0..a34c70478e 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -21,6 +21,8 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) @@ -112,6 +114,42 @@ The following validations are removed: #### Messages +##### BeaconBlocksByRange v2 + +**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` + +The Electra fork-digest is introduced to the `context` enum to specify Electra beacon block type. + +Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[0]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|--------------------------|-------------------------------| +| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | +| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | +| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | +| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | +| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | +| `ELECTRA_FORK_VERSION` | `electra.SignedBeaconBlock` | + +##### BeaconBlocksByRoot v2 + +**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` + +Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[0]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|--------------------------|-------------------------------| +| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | +| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | +| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | +| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | +| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | +| `ELECTRA_FORK_VERSION` | `electra.SignedBeaconBlock` | + ##### BlobSidecarsByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` From a9c5952658c22b3ec4f70bc0bef72f5937cde5c5 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Thu, 16 Jan 2025 13:27:43 -0600 Subject: [PATCH 62/76] Add v1 to req/resp messages in phase0 --- specs/phase0/p2p-interface.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 41a7b40bb7..71829525c6 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -41,12 +41,12 @@ - [Encoding strategies](#encoding-strategies) - [SSZ-snappy encoding strategy](#ssz-snappy-encoding-strategy) - [Messages](#messages) - - [Status](#status) - - [Goodbye](#goodbye) - - [BeaconBlocksByRange](#beaconblocksbyrange) - - [BeaconBlocksByRoot](#beaconblocksbyroot) - - [Ping](#ping) - - [GetMetaData](#getmetadata) + - [Status v1](#status-v1) + - [Goodbye v1](#goodbye-v1) + - [BeaconBlocksByRange v1](#beaconblocksbyrange-v1) + - [BeaconBlocksByRoot v1](#beaconblocksbyroot-v1) + - [Ping v1](#ping-v1) + - [GetMetaData v1](#getmetadata-v1) - [The discovery domain: discv5](#the-discovery-domain-discv5) - [Integration into libp2p stacks](#integration-into-libp2p-stacks) - [ENR structure](#enr-structure) @@ -722,7 +722,7 @@ Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload #### Messages -##### Status +##### Status v1 **Protocol ID:** ``/eth2/beacon_chain/req/status/1/`` @@ -770,7 +770,7 @@ SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange` the client might need to send `Status` request again to learn if the peer has a higher head. Implementers are free to implement such behavior in their own way. -##### Goodbye +##### Goodbye v1 **Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/`` @@ -796,7 +796,7 @@ The request/response MUST be encoded as a single SSZ-field. The response MUST consist of a single `response_chunk`. -##### BeaconBlocksByRange +##### BeaconBlocksByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/` @@ -875,7 +875,7 @@ In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_roo After the initial block, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. -##### BeaconBlocksByRoot +##### BeaconBlocksByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/` @@ -918,7 +918,7 @@ Clients SHOULD NOT respond with blocks that fail the beacon chain state transiti `/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period. -##### Ping +##### Ping v1 **Protocol ID:** `/eth2/beacon_chain/req/ping/1/` @@ -950,7 +950,7 @@ The request MUST be encoded as an SSZ-field. The response MUST consist of a single `response_chunk`. -##### GetMetaData +##### GetMetaData v1 **Protocol ID:** `/eth2/beacon_chain/req/metadata/1/` From 7d54cd061315af9e3174a1acf73e0ab8d577ede1 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Thu, 16 Jan 2025 13:30:02 -0600 Subject: [PATCH 63/76] For BlobSidecars, put ByRange before ByRoot --- specs/deneb/p2p-interface.md | 128 ++++++++++++++++----------------- specs/electra/p2p-interface.md | 20 +++--- 2 files changed, 74 insertions(+), 74 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2ea35ad500..14a457e203 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -30,9 +30,9 @@ - [Messages](#messages) - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - [Blob retrieval via local execution layer client](#blob-retrieval-via-local-execution-layer-client) - - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) - [Design decision rationale](#design-decision-rationale) - [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks) @@ -271,69 +271,6 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. Clients SHOULD include a block in the response as soon as it passes the gossip validation rules. Clients SHOULD NOT respond with blocks that fail the beacon chain state transition. -##### BlobSidecarsByRoot v1 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` - -*[New in Deneb:EIP4844]* - -Request Content: - -``` -( - List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS] -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] -) -``` - -Requests sidecars by block root and index. -The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests. -It may be less in the case that the responding peer is missing blocks or sidecars. - -Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`. - -No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time. - -`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). - -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. - -Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response. - -Clients MUST respond with at least one sidecar, if they have it. -Clients MAY limit the number of blocks and sidecars in the response. - -Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition - -For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type. - -Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[0]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|--------------------------------|---------------------| -| `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` | - -###### Blob retrieval via local execution layer client - -In addition to `BlobSidecarsByRoot` requests, recent blobs MAY be retrieved by querying the Execution Layer (i.e. via `engine_getBlobsV1`). -Implementers are encouraged to leverage this method to increase the likelihood of incorporating and attesting to the last block when its proposer is not able to publish blobs on time. - -When clients use the local execution layer to retrieve blobs, they MUST behave as if the corresponding `blob_sidecar` had been received via gossip. In particular they MUST: -* publish the corresponding `blob_sidecar` on the `blob_sidecar_{subnet_id}` subnet. -* update gossip rule related data structures (i.e. update the anti-equivocation cache). - ##### BlobSidecarsByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` @@ -418,6 +355,69 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: |--------------------------------|---------------------| | `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` | +##### BlobSidecarsByRoot v1 + +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` + +*[New in Deneb:EIP4844]* + +Request Content: + +``` +( + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS] +) +``` + +Response Content: + +``` +( + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] +) +``` + +Requests sidecars by block root and index. +The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests. +It may be less in the case that the responding peer is missing blocks or sidecars. + +Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`. + +No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time. + +`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). + +The response MUST consist of zero or more `response_chunk`. +Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. + +Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response. + +Clients MUST respond with at least one sidecar, if they have it. +Clients MAY limit the number of blocks and sidecars in the response. + +Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. +Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. +Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition + +For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type. + +Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[0]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|--------------------------------|---------------------| +| `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` | + +###### Blob retrieval via local execution layer client + +In addition to `BlobSidecarsByRoot` requests, recent blobs MAY be retrieved by querying the Execution Layer (i.e. via `engine_getBlobsV1`). +Implementers are encouraged to leverage this method to increase the likelihood of incorporating and attesting to the last block when its proposer is not able to publish blobs on time. + +When clients use the local execution layer to retrieve blobs, they MUST behave as if the corresponding `blob_sidecar` had been received via gossip. In particular they MUST: +* publish the corresponding `blob_sidecar` on the `blob_sidecar_{subnet_id}` subnet. +* update gossip rule related data structures (i.e. update the anti-equivocation cache). + ## Design decision rationale ### Why are blobs relayed as a sidecar, separate from beacon blocks? diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index a34c70478e..a0b0b28422 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -23,8 +23,8 @@ - [Messages](#messages) - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) @@ -150,9 +150,9 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | | `ELECTRA_FORK_VERSION` | `electra.SignedBeaconBlock` | -##### BlobSidecarsByRoot v1 +##### BlobSidecarsByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` *[Modified in Electra:EIP7691]* @@ -160,7 +160,8 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS_ELECTRA] + start_slot: Slot + count: uint64 ) ``` @@ -174,11 +175,11 @@ Response Content: *Updated validation* -No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. +Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` sidecars. -##### BlobSidecarsByRange v1 +##### BlobSidecarsByRoot v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` *[Modified in Electra:EIP7691]* @@ -186,8 +187,7 @@ Request Content: ``` ( - start_slot: Slot - count: uint64 + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS_ELECTRA] ) ``` @@ -201,4 +201,4 @@ Response Content: *Updated validation* -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` sidecars. +No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. From 155d15ebc301c8e4731131b69adaeaa18a056a5e Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Thu, 16 Jan 2025 13:33:22 -0600 Subject: [PATCH 64/76] For DataColumnSidecars, put ByRange before ByRoot --- specs/fulu/p2p-interface.md | 102 ++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index bd4e2fb89c..b721847b02 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -29,8 +29,8 @@ - [`data_column_sidecar_{subnet_id}`](#data_column_sidecar_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1) - [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1) + - [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1) - [GetMetaData v3](#getmetadata-v3) - [The discovery domain: discv5](#the-discovery-domain-discv5) - [ENR structure](#enr-structure) @@ -208,56 +208,6 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi #### Messages -##### DataColumnSidecarsByRoot v1 - -**Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_root/1/` - -*[New in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------------| -| `FULU_FORK_VERSION` | `fulu.DataColumnSidecar` | - -Request Content: - -``` -( - List[DataColumnIdentifier, MAX_REQUEST_DATA_COLUMN_SIDECARS] -) -``` - -Response Content: - -``` -( - List[DataColumnSidecar, MAX_REQUEST_DATA_COLUMN_SIDECARS] -) -``` - -Requests sidecars by block root and index. -The response is a list of `DataColumnIdentifier` whose length is less than or equal to the number of requests. -It may be less in the case that the responding peer is missing blocks or sidecars. - -Before consuming the next response chunk, the response reader SHOULD verify the data column sidecar is well-formatted through `verify_data_column_sidecar`, has valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and is correct w.r.t. the expected KZG commitments through `verify_data_column_sidecar_kzg_proofs`. - -No more than `MAX_REQUEST_DATA_COLUMN_SIDECARS` may be requested at a time. - -The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `DataColumnSidecar` payload. - -Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the data column sidecar in the response. - -Clients MUST respond with at least one sidecar, if they have it. -Clients MAY limit the number of blocks and sidecars in the response. - -Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. -Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition - ##### DataColumnSidecarsByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_range/1/` @@ -339,6 +289,56 @@ Clients MUST respond with data column sidecars that are consistent from a single After the initial data column sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. +##### DataColumnSidecarsByRoot v1 + +**Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_root/1/` + +*[New in Fulu:EIP7594]* + +The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[1]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|---------------------|--------------------------| +| `FULU_FORK_VERSION` | `fulu.DataColumnSidecar` | + +Request Content: + +``` +( + List[DataColumnIdentifier, MAX_REQUEST_DATA_COLUMN_SIDECARS] +) +``` + +Response Content: + +``` +( + List[DataColumnSidecar, MAX_REQUEST_DATA_COLUMN_SIDECARS] +) +``` + +Requests sidecars by block root and index. +The response is a list of `DataColumnIdentifier` whose length is less than or equal to the number of requests. +It may be less in the case that the responding peer is missing blocks or sidecars. + +Before consuming the next response chunk, the response reader SHOULD verify the data column sidecar is well-formatted through `verify_data_column_sidecar`, has valid inclusion proof through `verify_data_column_sidecar_inclusion_proof`, and is correct w.r.t. the expected KZG commitments through `verify_data_column_sidecar_kzg_proofs`. + +No more than `MAX_REQUEST_DATA_COLUMN_SIDECARS` may be requested at a time. + +The response MUST consist of zero or more `response_chunk`. +Each _successful_ `response_chunk` MUST contain a single `DataColumnSidecar` payload. + +Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS, FULU_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the data column sidecar in the response. + +Clients MUST respond with at least one sidecar, if they have it. +Clients MAY limit the number of blocks and sidecars in the response. + +Clients SHOULD include a sidecar in the response as soon as it passes the gossip validation rules. +Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules. +Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition + ##### GetMetaData v3 **Protocol ID:** `/eth2/beacon_chain/req/metadata/3/` From c85f58a91b33bb1ba8a759cae8f2b6c28b1b4925 Mon Sep 17 00:00:00 2001 From: debjit Date: Sat, 18 Jan 2025 19:13:31 +0530 Subject: [PATCH 65/76] Add test for pending deposits with same pubkey and different withdrawal credentials --- .../electra/fork/test_electra_fork_basic.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index 4416063b39..24ae4ae1f9 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -1,3 +1,4 @@ +import random from eth2spec.test.context import ( with_phases, with_custom_state, @@ -151,6 +152,38 @@ def test_fork_has_compounding_withdrawal_credential(spec, phases, state): )] +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_state +@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS) +def test_fork_pending_deposits_with_same_pubkey_different_withdrawal_credentials(spec, phases, state): + post_spec = phases[ELECTRA] + + num_validators = len(state.validators) + indexes_with_same_pubkey = random.sample(range(num_validators), min(10, num_validators)) + constant_pubkey = b'\x11' * 48 + + for index in range(num_validators): + state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH + + withdrawal_credentials = [] + for index in indexes_with_same_pubkey: + state.validators[index].pubkey = constant_pubkey + withdrawal_credentials.append(state.validators[index].withdrawal_credentials) + + # ensure that the withdrawal credentials are unique + assert len(set(withdrawal_credentials)) == len(withdrawal_credentials) + + post_state = yield from run_fork_test(post_spec, state) + + assert len(post_state.pending_deposits) == num_validators + + for index in indexes_with_same_pubkey: + assert post_state.pending_deposits[index].pubkey == constant_pubkey + expected_withdrawal_credentials = state.validators[index].withdrawal_credentials + assert post_state.pending_deposits[index].withdrawal_credentials == expected_withdrawal_credentials + + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_state From 6db823856de272a29406cdd1825a7d8bd9637be0 Mon Sep 17 00:00:00 2001 From: debjit Date: Sat, 18 Jan 2025 22:37:01 +0530 Subject: [PATCH 66/76] Add tests for deposit transitions with same pubkey and different withdrawal credentials --- .../electra/fork/test_electra_fork_basic.py | 9 ++---- .../sanity/blocks/test_deposit_transition.py | 29 +++++++++++++++++++ 2 files changed, 31 insertions(+), 7 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index 24ae4ae1f9..5da86b1c3f 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -166,13 +166,8 @@ def test_fork_pending_deposits_with_same_pubkey_different_withdrawal_credentials for index in range(num_validators): state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH - withdrawal_credentials = [] for index in indexes_with_same_pubkey: state.validators[index].pubkey = constant_pubkey - withdrawal_credentials.append(state.validators[index].withdrawal_credentials) - - # ensure that the withdrawal credentials are unique - assert len(set(withdrawal_credentials)) == len(withdrawal_credentials) post_state = yield from run_fork_test(post_spec, state) @@ -180,8 +175,8 @@ def test_fork_pending_deposits_with_same_pubkey_different_withdrawal_credentials for index in indexes_with_same_pubkey: assert post_state.pending_deposits[index].pubkey == constant_pubkey - expected_withdrawal_credentials = state.validators[index].withdrawal_credentials - assert post_state.pending_deposits[index].withdrawal_credentials == expected_withdrawal_credentials + assert (post_state.pending_deposits[index].withdrawal_credentials + == state.validators[index].withdrawal_credentials) @with_phases(phases=[DENEB], other_phases=[ELECTRA]) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index a9c2c62814..c350d201e7 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -1,3 +1,4 @@ +import random from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) @@ -262,3 +263,31 @@ def test_deposit_transition__deposit_and_top_up_same_block(spec, state): assert state.pending_deposits[pre_pending_deposits].amount == block.body.deposits[0].data.amount amount_from_deposit = block.body.execution_requests.deposits[0].amount assert state.pending_deposits[pre_pending_deposits + 1].amount == amount_from_deposit + + +@with_phases([ELECTRA]) +@spec_state_test +def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_credentials(spec, state): + deposit_count = 1 + deposit_request_count = 4 + + state, block = prepare_state_and_block(spec, state, + deposit_cnt=deposit_count, + deposit_request_cnt=deposit_request_count) + + # pick 2 indices among deposit requests to have the same pubkey as the deposit + indexes_with_same_pubkey = random.sample(range(deposit_count, deposit_request_count), 2) + for index in indexes_with_same_pubkey: + block.body.execution_requests.deposits[index].pubkey = block.body.deposits[0].data.pubkey + + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) + + deposit_requests = block.body.execution_requests.deposits.copy() + + yield from run_deposit_transition_block(spec, state, block) + + assert len(state.pending_deposits) == deposit_request_count + deposit_count + for index in indexes_with_same_pubkey: + assert state.pending_deposits[deposit_count + index].pubkey == deposit_requests[index].pubkey + assert (state.pending_deposits[deposit_count + index].withdrawal_credentials + == deposit_requests[index].withdrawal_credentials) \ No newline at end of file From 5c2d5511b8b28ad2832d1597e9908e31ded77e2a Mon Sep 17 00:00:00 2001 From: debjit Date: Sat, 18 Jan 2025 22:41:32 +0530 Subject: [PATCH 67/76] fmt and revert test for pending deposits with same pubkey and different withdrawal credentials --- .../electra/fork/test_electra_fork_basic.py | 28 ------------------- .../sanity/blocks/test_deposit_transition.py | 4 +-- 2 files changed, 2 insertions(+), 30 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index 5da86b1c3f..4416063b39 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -1,4 +1,3 @@ -import random from eth2spec.test.context import ( with_phases, with_custom_state, @@ -152,33 +151,6 @@ def test_fork_has_compounding_withdrawal_credential(spec, phases, state): )] -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) -@spec_test -@with_state -@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS) -def test_fork_pending_deposits_with_same_pubkey_different_withdrawal_credentials(spec, phases, state): - post_spec = phases[ELECTRA] - - num_validators = len(state.validators) - indexes_with_same_pubkey = random.sample(range(num_validators), min(10, num_validators)) - constant_pubkey = b'\x11' * 48 - - for index in range(num_validators): - state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH - - for index in indexes_with_same_pubkey: - state.validators[index].pubkey = constant_pubkey - - post_state = yield from run_fork_test(post_spec, state) - - assert len(post_state.pending_deposits) == num_validators - - for index in indexes_with_same_pubkey: - assert post_state.pending_deposits[index].pubkey == constant_pubkey - assert (post_state.pending_deposits[index].withdrawal_credentials - == state.validators[index].withdrawal_credentials) - - @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_state diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index c350d201e7..40058dbe20 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -274,7 +274,7 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede state, block = prepare_state_and_block(spec, state, deposit_cnt=deposit_count, deposit_request_cnt=deposit_request_count) - + # pick 2 indices among deposit requests to have the same pubkey as the deposit indexes_with_same_pubkey = random.sample(range(deposit_count, deposit_request_count), 2) for index in indexes_with_same_pubkey: @@ -290,4 +290,4 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede for index in indexes_with_same_pubkey: assert state.pending_deposits[deposit_count + index].pubkey == deposit_requests[index].pubkey assert (state.pending_deposits[deposit_count + index].withdrawal_credentials - == deposit_requests[index].withdrawal_credentials) \ No newline at end of file + == deposit_requests[index].withdrawal_credentials) From 70be586422ddada26c69e0fa01a69d7d7ed5e6e1 Mon Sep 17 00:00:00 2001 From: debjit Date: Mon, 20 Jan 2025 09:57:03 +0530 Subject: [PATCH 68/76] predefined indices which have the same pubkey --- .../test/electra/sanity/blocks/test_deposit_transition.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index 40058dbe20..375871685a 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -1,4 +1,3 @@ -import random from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, ) @@ -276,7 +275,7 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede deposit_request_cnt=deposit_request_count) # pick 2 indices among deposit requests to have the same pubkey as the deposit - indexes_with_same_pubkey = random.sample(range(deposit_count, deposit_request_count), 2) + indexes_with_same_pubkey = [1, 3] for index in indexes_with_same_pubkey: block.body.execution_requests.deposits[index].pubkey = block.body.deposits[0].data.pubkey From a079dbfd4c4d257be2a34a1a7920884ffc0145ce Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 20 Jan 2025 09:28:10 -0600 Subject: [PATCH 69/76] Rename indexes to indices --- .../test/electra/sanity/blocks/test_deposit_transition.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index 375871685a..a71c9e6c4f 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -275,8 +275,8 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede deposit_request_cnt=deposit_request_count) # pick 2 indices among deposit requests to have the same pubkey as the deposit - indexes_with_same_pubkey = [1, 3] - for index in indexes_with_same_pubkey: + indices_with_same_pubkey = [1, 3] + for index in indices_with_same_pubkey: block.body.execution_requests.deposits[index].pubkey = block.body.deposits[0].data.pubkey block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) @@ -286,7 +286,7 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede yield from run_deposit_transition_block(spec, state, block) assert len(state.pending_deposits) == deposit_request_count + deposit_count - for index in indexes_with_same_pubkey: + for index in indices_with_same_pubkey: assert state.pending_deposits[deposit_count + index].pubkey == deposit_requests[index].pubkey assert (state.pending_deposits[deposit_count + index].withdrawal_credentials == deposit_requests[index].withdrawal_credentials) From 13d9aa1374ed4e34e4377064e1bda8ebe6ce7d1c Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Mon, 20 Jan 2025 09:48:23 -0600 Subject: [PATCH 70/76] Add extra assert to ensure withdrawal creds are different --- .../test/electra/sanity/blocks/test_deposit_transition.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py index a71c9e6c4f..f2d9a6f11a 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_deposit_transition.py @@ -278,6 +278,9 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede indices_with_same_pubkey = [1, 3] for index in indices_with_same_pubkey: block.body.execution_requests.deposits[index].pubkey = block.body.deposits[0].data.pubkey + # ensure top-up deposit request withdrawal credentials are different than the deposit + assert (block.body.execution_requests.deposits[index].withdrawal_credentials + != block.body.deposits[0].data.withdrawal_credentials) block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) @@ -288,5 +291,6 @@ def test_deposit_transition__deposit_with_same_pubkey_different_withdrawal_crede assert len(state.pending_deposits) == deposit_request_count + deposit_count for index in indices_with_same_pubkey: assert state.pending_deposits[deposit_count + index].pubkey == deposit_requests[index].pubkey + # ensure withdrawal credentials are retained, rather than being made the same assert (state.pending_deposits[deposit_count + index].withdrawal_credentials == deposit_requests[index].withdrawal_credentials) From 19583b7d51a851d7c5fa23a7656d15e3b47029dc Mon Sep 17 00:00:00 2001 From: NC <17676176+ensi321@users.noreply.github.com> Date: Mon, 20 Jan 2025 21:56:05 -0800 Subject: [PATCH 71/76] Add blocks with several withdrawal requests --- .../test/electra/sanity/blocks/test_blocks.py | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py index 5a4b98c3c8..d85dbf5cee 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py @@ -19,6 +19,7 @@ ) from eth2spec.test.helpers.withdrawals import ( set_eth1_withdrawal_credential_with_balance, + set_compounding_withdrawal_credential_with_balance, ) @@ -172,3 +173,79 @@ def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state): yield 'post', state assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + +@with_electra_and_later +@spec_state_test +def test_multiple_el_partial_withdrawal_requests_same_validator(spec, state): + # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + validator_index = 0 + address = b'\x22' * 20 + balance = spec.MIN_ACTIVATION_BALANCE + 2000000000 + set_compounding_withdrawal_credential_with_balance(spec, state, validator_index, balance, balance, address) + + assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + yield 'pre', state + + validator_pubkey = state.validators[validator_index].pubkey + withdrawal_request_1 = spec.WithdrawalRequest( + source_address=address, + validator_pubkey=validator_pubkey, + amount=spec.Gwei(1), + ) + withdrawal_request_2 = spec.WithdrawalRequest( + source_address=address, + validator_pubkey=validator_pubkey, + amount=spec.Gwei(2), + ) + block = build_empty_block_for_next_slot(spec, state) + block.body.execution_requests.withdrawals = [withdrawal_request_1, withdrawal_request_2] + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) + signed_block = state_transition_and_sign_block(spec, state, block) + + yield 'blocks', [signed_block] + yield 'post', state + + assert len(state.pending_partial_withdrawals) == 2 + assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + +@with_electra_and_later +@spec_state_test +def test_multiple_el_partial_withdrawal_requests_different_validator(spec, state): + # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + validator_indices = [1, 2] + addresses = [bytes([v * 0x11]) * 20 for v in validator_indices] + balances = [spec.MIN_ACTIVATION_BALANCE + v * 2000000000 for v in validator_indices] + + for validator_index, address, balance in zip(validator_indices, addresses, balances): + set_compounding_withdrawal_credential_with_balance(spec, state, validator_index, balance, balance, address) + assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + + yield 'pre', state + + withdrawal_requests = [] + + for validator_index, address in zip(validator_indices, addresses): + validator_pubkey = state.validators[validator_index].pubkey + withdrawal_request = spec.WithdrawalRequest( + source_address=address, + validator_pubkey=validator_pubkey, + amount=spec.Gwei(validator_index), + ) + withdrawal_requests.append(withdrawal_request) + + block = build_empty_block_for_next_slot(spec, state) + block.body.execution_requests.withdrawals = withdrawal_requests + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) + signed_block = state_transition_and_sign_block(spec, state, block) + + yield 'blocks', [signed_block] + yield 'post', state + + assert len(state.pending_partial_withdrawals) == 2 + for validator_index in validator_indices: + assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH \ No newline at end of file From ad63ae7e38be6c98d4fe2b3ace273818901082bc Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 21 Jan 2025 13:47:42 +0600 Subject: [PATCH 72/76] Fix lint --- .../pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py index d85dbf5cee..e5b07cd3dd 100644 --- a/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/electra/sanity/blocks/test_blocks.py @@ -174,6 +174,7 @@ def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state): assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + @with_electra_and_later @spec_state_test def test_multiple_el_partial_withdrawal_requests_same_validator(spec, state): @@ -211,6 +212,7 @@ def test_multiple_el_partial_withdrawal_requests_same_validator(spec, state): assert len(state.pending_partial_withdrawals) == 2 assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + @with_electra_and_later @spec_state_test def test_multiple_el_partial_withdrawal_requests_different_validator(spec, state): @@ -248,4 +250,4 @@ def test_multiple_el_partial_withdrawal_requests_different_validator(spec, state assert len(state.pending_partial_withdrawals) == 2 for validator_index in validator_indices: - assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH \ No newline at end of file + assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH From bd212e76925e6b49344decbfaabea8637253b239 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 21 Jan 2025 17:02:07 -0600 Subject: [PATCH 73/76] Pin codespell version --- setup.py | 2 +- tests/formats/sync/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 55f1d0e344..93c7739a52 100644 --- a/setup.py +++ b/setup.py @@ -578,7 +578,7 @@ def run(self): python_requires=">=3.9, <4", extras_require={ "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], - "lint": ["flake8==5.0.4", "mypy==0.981", "pylint==3.3.1", "codespell<3.0.0,>=2.0.0"], + "lint": ["flake8==5.0.4", "mypy==0.981", "pylint==3.3.1", "codespell==2.4.0"], "generator": ["setuptools>=72.0.0", "pytest>4.4", "python-snappy==0.7.3", "filelock", "pathos==0.3.0"], "docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"] }, diff --git a/tests/formats/sync/README.md b/tests/formats/sync/README.md index ff9f8168cb..511900e815 100644 --- a/tests/formats/sync/README.md +++ b/tests/formats/sync/README.md @@ -1,3 +1,3 @@ # Sync tests -It re-uses the [fork choice test format](../fork_choice/README.md) to apply the test script. +It reuses the [fork choice test format](../fork_choice/README.md) to apply the test script. From 5525cc98ccfd5598c74764691c3b728435631db6 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 22 Jan 2025 22:36:46 +1100 Subject: [PATCH 74/76] Update Fulu configs --- configs/mainnet.yaml | 3 +-- configs/minimal.yaml | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index e54db49661..7f96d087c9 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -171,8 +171,7 @@ MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 MAX_BLOBS_PER_BLOCK_FULU: 12 -# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` -MAX_REQUEST_BLOB_SIDECARS_FULU: 1536 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # Whisk # `Epoch(2**8)` diff --git a/configs/minimal.yaml b/configs/minimal.yaml index a15314bb1f..559e04d70e 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -172,8 +172,7 @@ MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 SAMPLES_PER_SLOT: 8 CUSTODY_REQUIREMENT: 4 MAX_BLOBS_PER_BLOCK_FULU: 12 -# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` -MAX_REQUEST_BLOB_SIDECARS_FULU: 1536 +MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 # Whisk WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4 From b1de7ef7d0382dd1680d469f0cb603c0c3fc2182 Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 22 Jan 2025 12:35:10 -0600 Subject: [PATCH 75/76] Move dependencies to pyproject.toml (#4093) --- .circleci/config.yml | 4 +- Makefile | 3 +- pyproject.toml | 55 ++++++++++ setup.py | 102 ++++++------------ solidity_deposit_contract/Makefile | 2 +- .../requirements_preinstallation.txt | 0 6 files changed, 92 insertions(+), 74 deletions(-) create mode 100644 pyproject.toml rename requirements_preinstallation.txt => solidity_deposit_contract/web3_tester/requirements_preinstallation.txt (100%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 38bd6f422d..c1120b719f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -36,13 +36,13 @@ commands: steps: - restore_cached_venv: venv_name: v32-pyspec - reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} + reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "pyproject.toml" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: venv_name: v32-pyspec - reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} + reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "pyproject.toml" }} venv_path: ./venv jobs: checkout_specs: diff --git a/Makefile b/Makefile index 09e914c3ca..a845c69685 100644 --- a/Makefile +++ b/Makefile @@ -61,10 +61,9 @@ PIP_VENV = $(VENV)/bin/pip3 CODESPELL_VENV = $(VENV)/bin/codespell # Make a virtual environment will all of the necessary dependencies. -$(VENV): requirements_preinstallation.txt +$(VENV): pyproject.toml @echo "Creating virtual environment" @python3 -m venv $(VENV) - @$(PIP_VENV) install -r requirements_preinstallation.txt ############################################################################### # Specification diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..d9485f2e8c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +requires = [ + "marko==1.0.2", + "ruamel.yaml==0.17.21", + "setuptools==75.8.0", + "wheel==0.45.1", +] + +[project] +name = "eth2spec" +dynamic = ["version"] +authors = [{ name = "ethereum" }] +description = "Ethereum consensus layer specifications package" +readme = { file = "README.md", content-type = "text/markdown" } +requires-python = ">=3.9,<4.0" +dependencies = [ + "curdleproofs==0.1.2", + "eth-typing==3.5.2", + "eth-utils==2.3.2", + "lru-dict==1.2.0", + "marko==1.0.2", + "milagro_bls_binding==1.9.0", + "py_arkworks_bls12381==0.3.8", + "py_ecc==6.0.0", + "pycryptodome==3.21.0", + "remerkleable==0.1.28", + "ruamel.yaml==0.17.21", + "setuptools==75.8.0", + "trie==3.0.1", +] + +[project.optional-dependencies] +test = [ + "pytest-cov==6.0.0", + "pytest-xdist==3.6.1", + "pytest==8.3.4", +] +lint = [ + "codespell==2.4.0", + "flake8==5.0.4", + "mypy==0.981", + "pylint==3.3.1", +] +generator = [ + "filelock==3.17.0", + "pathos==0.3.0", + "pytest==8.3.4", + "python-snappy==0.7.3", +] +docs = [ + "mdx-truly-sane-lists==1.3", + "mkdocs-awesome-pages-plugin==2.8.0", + "mkdocs-material==9.1.5", + "mkdocs==1.4.2", +] diff --git a/setup.py b/setup.py index 93c7739a52..8bfcbed222 100644 --- a/setup.py +++ b/setup.py @@ -1,74 +1,62 @@ -from setuptools import setup, find_packages, Command -from setuptools.command.build_py import build_py -from distutils import dir_util -from distutils.util import convert_path -from pathlib import Path +import ast +import copy +import json +import logging import os import string -from typing import Dict, List, Sequence, Optional, Tuple -import ast -import subprocess import sys -import copy +import warnings + from collections import OrderedDict -import json +from distutils import dir_util +from distutils.util import convert_path from functools import lru_cache +from marko.block import Heading, FencedCode, LinkRefDef, BlankLine +from marko.ext.gfm import gfm +from marko.ext.gfm.elements import Table +from marko.inline import CodeSpan +from pathlib import Path +from ruamel.yaml import YAML +from setuptools import setup, find_packages, Command +from setuptools.command.build_py import build_py +from typing import Dict, List, Sequence, Optional, Tuple + +pysetup_path = os.path.abspath(os.path.dirname(__file__)) +sys.path.insert(0, pysetup_path) from pysetup.constants import ( - # code names PHASE0, - # misc ETH2_SPEC_COMMENT_PREFIX, ) -from pysetup.spec_builders import spec_builders -from pysetup.typing import ( - BuildTarget, - ProtocolDefinition, - SpecObject, - VariableDefinition, -) from pysetup.helpers import ( combine_spec_objects, dependency_order_class_objects, objects_to_spec, parse_config_vars, ) -from pysetup.md_doc_paths import get_md_doc_paths +from pysetup.md_doc_paths import ( + get_md_doc_paths +) +from pysetup.spec_builders import ( + spec_builders +) +from pysetup.typing import ( + BuildTarget, + ProtocolDefinition, + SpecObject, + VariableDefinition, +) + # Ignore '1.5.0-alpha.*' to '1.5.0a*' messages. -import warnings warnings.filterwarnings('ignore', message='Normalizing .* to .*') # Ignore 'running' and 'creating' messages -import logging class PyspecFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith(('running ', 'creating ')) logging.getLogger().addFilter(PyspecFilter()) -# NOTE: have to programmatically include third-party dependencies in `setup.py`. -def installPackage(package: str): - subprocess.check_call([sys.executable, '-m', 'pip', 'install', package]) - -RUAMEL_YAML_VERSION = "ruamel.yaml==0.17.21" -try: - import ruamel.yaml -except ImportError: - installPackage(RUAMEL_YAML_VERSION) - -from ruamel.yaml import YAML - -MARKO_VERSION = "marko==1.0.2" -try: - import marko -except ImportError: - installPackage(MARKO_VERSION) - -from marko.block import Heading, FencedCode, LinkRefDef, BlankLine -from marko.inline import CodeSpan -from marko.ext.gfm import gfm -from marko.ext.gfm.elements import Table - @lru_cache(maxsize=None) def _get_name_from_heading(heading: Heading) -> Optional[str]: @@ -550,12 +538,9 @@ def run(self): spec_version = f.read().strip() setup( - name='eth2spec', version=spec_version, - description="Eth2 spec, provided as Python package for tooling and testing", long_description=readme, long_description_content_type="text/markdown", - author="ethereum", url="https://github.com/ethereum/consensus-specs", include_package_data=False, package_data={ @@ -575,25 +560,4 @@ def run(self): packages=find_packages(where='tests/core/pyspec') + ['configs', 'presets', 'specs', 'presets', 'sync'], py_modules=["eth2spec"], cmdclass=commands, - python_requires=">=3.9, <4", - extras_require={ - "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], - "lint": ["flake8==5.0.4", "mypy==0.981", "pylint==3.3.1", "codespell==2.4.0"], - "generator": ["setuptools>=72.0.0", "pytest>4.4", "python-snappy==0.7.3", "filelock", "pathos==0.3.0"], - "docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"] - }, - install_requires=[ - "eth-utils>=2.0.0,<3", - "eth-typing>=3.2.0,<4.0.0", - "pycryptodome>=3.19.1", - "py_ecc==6.0.0", - "milagro_bls_binding==1.9.0", - "remerkleable==0.1.28", - "trie>=3,<4", - RUAMEL_YAML_VERSION, - "lru-dict==1.2.0", - MARKO_VERSION, - "py_arkworks_bls12381==0.3.8", - "curdleproofs==0.1.2", - ] ) diff --git a/solidity_deposit_contract/Makefile b/solidity_deposit_contract/Makefile index a353d931bb..60d45ede49 100644 --- a/solidity_deposit_contract/Makefile +++ b/solidity_deposit_contract/Makefile @@ -30,7 +30,7 @@ install_deposit_contract_web3_tester: @cd $(DEPOSIT_CONTRACT_TESTER_DIR); \ python3 -m venv venv; \ source venv/bin/activate; \ - python3 -m pip install -r ../../requirements_preinstallation.txt; \ + python3 -m pip install -r requirements_preinstallation.txt; \ python3 -m pip install -r requirements.txt test_deposit_contract_web3_tests: diff --git a/requirements_preinstallation.txt b/solidity_deposit_contract/web3_tester/requirements_preinstallation.txt similarity index 100% rename from requirements_preinstallation.txt rename to solidity_deposit_contract/web3_tester/requirements_preinstallation.txt From 1d9577aa5188c1245973218ade3d9712486867db Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Wed, 22 Jan 2025 12:53:09 -0600 Subject: [PATCH 76/76] Bump version to 1.5.0-beta.1 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index ba25d3754e..591650acef 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.5.0-beta.0 +1.5.0-beta.1