Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into jimmy/lh-2271-act…
Browse files Browse the repository at this point in the history
…ivate-peerdas-at-fulu-fork-and-remove-eip7594_fork_epoch

# Conflicts:
#	beacon_node/beacon_chain/src/fetch_blobs.rs
#	beacon_node/store/src/lib.rs
#	beacon_node/store/src/memory_store.rs
  • Loading branch information
jimmygchen committed Jan 23, 2025
2 parents e813532 + a1b7d61 commit b3da74b
Show file tree
Hide file tree
Showing 50 changed files with 2,207 additions and 1,113 deletions.
27 changes: 14 additions & 13 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release"
PINNED_NIGHTLY ?= nightly

# List of features to use when cross-compiling. Can be overridden via the environment.
CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc
CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc,beacon-node-leveldb,beacon-node-redb

# Cargo profile for Cross builds. Default is for local builds, CI uses an override.
CROSS_PROFILE ?= release
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,6 @@ impl<E: EthSpec> PendingComponents<E> {
None,
)
};

let executed_block = recover(diet_executed_block)?;

let AvailabilityPendingExecutedBlock {
Expand Down Expand Up @@ -732,7 +731,7 @@ mod test {
use slog::{info, Logger};
use state_processing::ConsensusContext;
use std::collections::VecDeque;
use store::{HotColdDB, ItemStore, LevelDB, StoreConfig};
use store::{database::interface::BeaconNodeBackend, HotColdDB, ItemStore, StoreConfig};
use tempfile::{tempdir, TempDir};
use types::non_zero_usize::new_non_zero_usize;
use types::{ExecPayload, MinimalEthSpec};
Expand All @@ -744,7 +743,7 @@ mod test {
db_path: &TempDir,
spec: Arc<ChainSpec>,
log: Logger,
) -> Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>> {
) -> Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>> {
let hot_path = db_path.path().join("hot_db");
let cold_path = db_path.path().join("cold_db");
let blobs_path = db_path.path().join("blobs_db");
Expand Down Expand Up @@ -920,7 +919,11 @@ mod test {
)
where
E: EthSpec,
T: BeaconChainTypes<HotStore = LevelDB<E>, ColdStore = LevelDB<E>, EthSpec = E>,
T: BeaconChainTypes<
HotStore = BeaconNodeBackend<E>,
ColdStore = BeaconNodeBackend<E>,
EthSpec = E,
>,
{
let log = test_logger();
let chain_db_path = tempdir().expect("should get temp dir");
Expand Down
31 changes: 21 additions & 10 deletions beacon_node/beacon_chain/src/fetch_blobs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use crate::{metrics, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes
use execution_layer::json_structures::BlobAndProofV1;
use execution_layer::Error as ExecutionLayerError;
use metrics::{inc_counter, inc_counter_by, TryExt};
use slog::{debug, error, o, warn, Logger};
use slog::{debug, error, o, Logger};
use ssz_types::FixedVector;
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
use std::sync::Arc;
Expand Down Expand Up @@ -163,6 +163,20 @@ pub async fn fetch_and_process_engine_blobs<T: BeaconChainTypes>(
return Ok(None);
}

if chain
.canonical_head
.fork_choice_read_lock()
.contains_block(&block_root)
{
// Avoid computing columns if block has already been imported.
debug!(
log,
"Ignoring EL blobs response";
"info" => "block has already been imported",
);
return Ok(None);
}

let data_columns_receiver = spawn_compute_and_publish_data_columns_task(
&chain,
block.clone(),
Expand Down Expand Up @@ -249,23 +263,20 @@ fn spawn_compute_and_publish_data_columns_task<T: BeaconChainTypes>(
};

if data_columns_sender.send(all_data_columns.clone()).is_err() {
// Data column receiver have been dropped - this may not be an issue if the block is
// already fully imported. This should not happen after the race condition
// described in #6816 is fixed.
warn!(
// Data column receiver have been dropped - block may have already been imported.
// This race condition exists because gossip columns may arrive and trigger block
// import during the computation. Here we just drop the computed columns.
debug!(
log,
"Failed to send computed data columns";
);
return;
};

// Check indices from cache before sending the columns, to make sure we don't
// publish components already seen on gossip.
let is_supernode = chain_cloned.data_availability_checker.is_supernode();

// At the moment non supernodes are not required to publish any columns.
// TODO(das): we could experiment with having full nodes publish their custodied
// columns here.
if !is_supernode {
if !chain_cloned.data_availability_checker.is_supernode() {
return;
}

Expand Down
11 changes: 5 additions & 6 deletions beacon_node/beacon_chain/src/historical_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,7 @@ use std::borrow::Cow;
use std::iter;
use std::time::Duration;
use store::metadata::DataColumnInfo;
use store::{
get_key_for_col, AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore,
KeyValueStoreOp,
};
use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp};
use strum::IntoStaticStr;
use types::{FixedBytesExtended, Hash256, Slot};

Expand Down Expand Up @@ -153,7 +150,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// Store block roots, including at all skip slots in the freezer DB.
for slot in (block.slot().as_u64()..prev_block_slot.as_u64()).rev() {
cold_batch.push(KeyValueStoreOp::PutKeyValue(
get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()),
DBColumn::BeaconBlockRoots,
slot.to_be_bytes().to_vec(),
block_root.as_slice().to_vec(),
));
}
Expand All @@ -169,7 +167,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let genesis_slot = self.spec.genesis_slot;
for slot in genesis_slot.as_u64()..prev_block_slot.as_u64() {
cold_batch.push(KeyValueStoreOp::PutKeyValue(
get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()),
DBColumn::BeaconBlockRoots,
slot.to_be_bytes().to_vec(),
self.genesis_block_root.as_slice().to_vec(),
));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@ use crate::validator_pubkey_cache::DatabasePubkey;
use slog::{info, Logger};
use ssz::{Decode, Encode};
use std::sync::Arc;
use store::{
get_key_for_col, DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem,
};
use store::{DBColumn, Error, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem};
use types::{Hash256, PublicKey};

const LOG_EVERY: usize = 200_000;
Expand Down Expand Up @@ -62,9 +60,9 @@ pub fn downgrade_from_v21<T: BeaconChainTypes>(
message: format!("{e:?}"),
})?;

let db_key = get_key_for_col(DBColumn::PubkeyCache.into(), key.as_slice());
ops.push(KeyValueStoreOp::PutKeyValue(
db_key,
DBColumn::PubkeyCache,
key.as_slice().to_vec(),
pubkey_bytes.as_ssz_bytes(),
));

Expand Down
17 changes: 6 additions & 11 deletions beacon_node/beacon_chain/src/schema_change/migration_schema_v22.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ use std::sync::Arc;
use store::chunked_iter::ChunkedVectorIter;
use store::{
chunked_vector::BlockRootsChunked,
get_key_for_col,
metadata::{
SchemaVersion, ANCHOR_FOR_ARCHIVE_NODE, ANCHOR_UNINITIALIZED, STATE_UPPER_LIMIT_NO_RETAIN,
},
Expand All @@ -21,7 +20,7 @@ fn load_old_schema_frozen_state<T: BeaconChainTypes>(
) -> Result<Option<BeaconState<T::EthSpec>>, Error> {
let Some(partial_state_bytes) = db
.cold_db
.get_bytes(DBColumn::BeaconState.into(), state_root.as_slice())?
.get_bytes(DBColumn::BeaconState, state_root.as_slice())?
else {
return Ok(None);
};
Expand Down Expand Up @@ -136,10 +135,7 @@ pub fn delete_old_schema_freezer_data<T: BeaconChainTypes>(
for column in columns {
for res in db.cold_db.iter_column_keys::<Vec<u8>>(column) {
let key = res?;
cold_ops.push(KeyValueStoreOp::DeleteKey(get_key_for_col(
column.as_str(),
&key,
)));
cold_ops.push(KeyValueStoreOp::DeleteKey(column, key));
}
}
let delete_ops = cold_ops.len();
Expand Down Expand Up @@ -175,7 +171,8 @@ pub fn write_new_schema_block_roots<T: BeaconChainTypes>(
// Store the genesis block root if it would otherwise not be stored.
if oldest_block_slot != 0 {
cold_ops.push(KeyValueStoreOp::PutKeyValue(
get_key_for_col(DBColumn::BeaconBlockRoots.into(), &0u64.to_be_bytes()),
DBColumn::BeaconBlockRoots,
0u64.to_be_bytes().to_vec(),
genesis_block_root.as_slice().to_vec(),
));
}
Expand All @@ -192,10 +189,8 @@ pub fn write_new_schema_block_roots<T: BeaconChainTypes>(
// OK to hold these in memory (10M slots * 43 bytes per KV ~= 430 MB).
for (i, (slot, block_root)) in block_root_iter.enumerate() {
cold_ops.push(KeyValueStoreOp::PutKeyValue(
get_key_for_col(
DBColumn::BeaconBlockRoots.into(),
&(slot as u64).to_be_bytes(),
),
DBColumn::BeaconBlockRoots,
slot.to_be_bytes().to_vec(),
block_root.as_slice().to_vec(),
));

Expand Down
15 changes: 11 additions & 4 deletions beacon_node/beacon_chain/src/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, LazyLock};
use std::time::Duration;
use store::{config::StoreConfig, HotColdDB, ItemStore, LevelDB, MemoryStore};
use store::database::interface::BeaconNodeBackend;
use store::{config::StoreConfig, HotColdDB, ItemStore, MemoryStore};
use task_executor::TaskExecutor;
use task_executor::{test_utils::TestRuntime, ShutdownReason};
use tree_hash::TreeHash;
Expand Down Expand Up @@ -118,7 +119,7 @@ pub fn get_kzg(spec: &ChainSpec) -> Arc<Kzg> {
pub type BaseHarnessType<E, THotStore, TColdStore> =
Witness<TestingSlotClock, CachingEth1Backend<E>, E, THotStore, TColdStore>;

pub type DiskHarnessType<E> = BaseHarnessType<E, LevelDB<E>, LevelDB<E>>;
pub type DiskHarnessType<E> = BaseHarnessType<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>;
pub type EphemeralHarnessType<E> = BaseHarnessType<E, MemoryStore<E>, MemoryStore<E>>;

pub type BoxedMutator<E, Hot, Cold> = Box<
Expand Down Expand Up @@ -301,7 +302,10 @@ impl<E: EthSpec> Builder<EphemeralHarnessType<E>> {

impl<E: EthSpec> Builder<DiskHarnessType<E>> {
/// Disk store, start from genesis.
pub fn fresh_disk_store(mut self, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) -> Self {
pub fn fresh_disk_store(
mut self,
store: Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>,
) -> Self {
let validator_keypairs = self
.validator_keypairs
.clone()
Expand All @@ -326,7 +330,10 @@ impl<E: EthSpec> Builder<DiskHarnessType<E>> {
}

/// Disk store, resume.
pub fn resumed_disk_store(mut self, store: Arc<HotColdDB<E, LevelDB<E>, LevelDB<E>>>) -> Self {
pub fn resumed_disk_store(
mut self,
store: Arc<HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>>,
) -> Self {
let mutator = move |builder: BeaconChainBuilder<_>| {
builder
.resume_from_db()
Expand Down
5 changes: 3 additions & 2 deletions beacon_node/beacon_chain/tests/op_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ use state_processing::per_block_processing::errors::{
AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid,
};
use std::sync::{Arc, LazyLock};
use store::{LevelDB, StoreConfig};
use store::database::interface::BeaconNodeBackend;
use store::StoreConfig;
use tempfile::{tempdir, TempDir};
use types::*;

Expand All @@ -26,7 +27,7 @@ static KEYPAIRS: LazyLock<Vec<Keypair>> =

type E = MinimalEthSpec;
type TestHarness = BeaconChainHarness<DiskHarnessType<E>>;
type HotColdDB = store::HotColdDB<E, LevelDB<E>, LevelDB<E>>;
type HotColdDB = store::HotColdDB<E, BeaconNodeBackend<E>, BeaconNodeBackend<E>>;

fn get_store(db_path: &TempDir) -> Arc<HotColdDB> {
let spec = Arc::new(test_spec::<E>());
Expand Down
Loading

0 comments on commit b3da74b

Please sign in to comment.