Skip to content

Commit

Permalink
Implement fairer pruning proof comparison
Browse files Browse the repository at this point in the history
  • Loading branch information
coderofstuff committed Nov 7, 2024
1 parent b56b341 commit e7efe41
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 15 deletions.
4 changes: 2 additions & 2 deletions consensus/core/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use crate::{
tx::TxResult,
},
header::Header,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
BlockHashSet, BlueWorkType, ChainPath,
Expand Down Expand Up @@ -203,7 +203,7 @@ pub trait ConsensusApi: Send + Sync {
unimplemented!()
}

fn validate_pruning_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> {
fn validate_pruning_proof(&self, proof: &PruningPointProof, proof_metadata: &PruningProofMetadata) -> PruningImportResult<()> {
unimplemented!()
}

Expand Down
21 changes: 21 additions & 0 deletions consensus/core/src/pruning.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::{
header::Header,
trusted::{TrustedGhostdagData, TrustedHeader},
BlueWorkType,
};
use kaspa_hashes::Hash;
use std::sync::Arc;
Expand All @@ -19,3 +20,23 @@ pub struct PruningPointTrustedData {
/// Union of GHOSTDAG data required to verify blocks in the future of the pruning point
pub ghostdag_blocks: Vec<TrustedGhostdagData>,
}

#[derive(Clone, Copy)]
pub struct PruningProofMetadata {
relay_block_blue_work: BlueWorkType,
}

impl PruningProofMetadata {
pub fn new(relay_block_blue_work: BlueWorkType) -> Self {
Self { relay_block_blue_work }
}

/// The amount of blue work since the syncer's pruning point
pub fn claimed_prover_relay_work(&self, pruning_point_work: BlueWorkType) -> BlueWorkType {
if self.relay_block_blue_work <= pruning_point_work {
return 0.into();
}

self.relay_block_blue_work - pruning_point_work
}
}
10 changes: 7 additions & 3 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ use kaspa_consensus_core::{
merkle::calc_hash_merkle_root,
muhash::MuHashExtensions,
network::NetworkType,
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList},
pruning::{PruningPointProof, PruningPointTrustedData, PruningPointsList, PruningProofMetadata},
trusted::{ExternalGhostdagData, TrustedBlock},
tx::{MutableTransaction, Transaction, TransactionOutpoint, UtxoEntry},
BlockHashSet, BlueWorkType, ChainPath, HashMapCustomHasher,
Expand Down Expand Up @@ -757,8 +757,12 @@ impl ConsensusApi for Consensus {
calc_hash_merkle_root(txs.iter(), storage_mass_activated)
}

fn validate_pruning_proof(&self, proof: &PruningPointProof) -> Result<(), PruningImportError> {
self.services.pruning_proof_manager.validate_pruning_point_proof(proof)
fn validate_pruning_proof(
&self,
proof: &PruningPointProof,
proof_metadata: &PruningProofMetadata,
) -> Result<(), PruningImportError> {
self.services.pruning_proof_manager.validate_pruning_point_proof(proof, proof_metadata)
}

fn apply_pruning_proof(&self, proof: PruningPointProof, trusted_set: &[TrustedBlock]) -> PruningImportResult<()> {
Expand Down
30 changes: 25 additions & 5 deletions consensus/src/processes/pruning_proof/validate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ use kaspa_consensus_core::{
blockhash::{BlockHashExtensions, BlockHashes, ORIGIN},
errors::pruning::{PruningImportError, PruningImportResult},
header::Header,
pruning::PruningPointProof,
BlockLevel,
pruning::{PruningPointProof, PruningProofMetadata},
BlockLevel, BlueWorkType,
};
use kaspa_core::info;
use kaspa_database::prelude::{CachePolicy, ConnBuilder, StoreResultEmptyTuple, StoreResultExtensions};
Expand All @@ -26,6 +26,7 @@ use crate::{
stores::{
ghostdag::{CompactGhostdagData, DbGhostdagStore, GhostdagStore, GhostdagStoreReader},
headers::{DbHeadersStore, HeaderStore, HeaderStoreReader},
headers_selected_tip::HeadersSelectedTipStoreReader,
pruning::PruningStoreReader,
reachability::{DbReachabilityStore, ReachabilityStoreReader},
relations::{DbRelationsStore, RelationsStoreReader},
Expand All @@ -40,7 +41,11 @@ use crate::{
use super::{PruningProofManager, TempProofContext};

impl PruningProofManager {
pub fn validate_pruning_point_proof(&self, proof: &PruningPointProof) -> PruningImportResult<()> {
pub fn validate_pruning_point_proof(
&self,
proof: &PruningPointProof,
proof_metadata: &PruningProofMetadata,
) -> PruningImportResult<()> {
if proof.len() != self.max_block_level as usize + 1 {
return Err(PruningImportError::ProofNotEnoughLevels(self.max_block_level as usize + 1));
}
Expand Down Expand Up @@ -77,6 +82,10 @@ impl PruningProofManager {
let current_pp = pruning_read.get().unwrap().pruning_point;
let current_pp_header = self.headers_store.get_header(current_pp).unwrap();

let current_consensus_tip_work_diff =
SignedInteger::from(self.get_current_consensus_selected_tip_work(current_pp_header.blue_work));
let relay_header_work_diff = SignedInteger::from(proof_metadata.claimed_prover_relay_work(proof_pp_header.blue_work));

for (level_idx, selected_tip) in proof_selected_tip_by_level.iter().copied().enumerate() {
let level = level_idx as BlockLevel;
self.validate_proof_selected_tip(selected_tip, level, proof_pp_level, proof_pp, proof_pp_header)?;
Expand All @@ -93,7 +102,6 @@ impl PruningProofManager {
// we can determine if the proof is better. The proof is better if the blue work* difference between the
// old current consensus's tips and the common ancestor is less than the blue work difference between the
// proof's tip and the common ancestor.
// *Note: blue work is the same as blue score on levels higher than 0
if let Some((proof_common_ancestor_gd, common_ancestor_gd)) = self.find_proof_and_consensus_common_ancestor_ghostdag_data(
&proof_ghostdag_stores,
&current_consensus_ghostdag_stores,
Expand All @@ -107,7 +115,9 @@ impl PruningProofManager {
let parent_blue_work = current_consensus_ghostdag_stores[level_idx].get_blue_work(parent).unwrap();
let parent_blue_work_diff =
SignedInteger::from(parent_blue_work) - SignedInteger::from(common_ancestor_gd.blue_work);
if parent_blue_work_diff >= selected_tip_blue_work_diff {
// TODO: Do something about not being to use add
if parent_blue_work_diff - relay_header_work_diff >= selected_tip_blue_work_diff - current_consensus_tip_work_diff
{
return Err(PruningImportError::PruningProofInsufficientBlueWork);
}
}
Expand Down Expand Up @@ -158,6 +168,16 @@ impl PruningProofManager {
Err(PruningImportError::PruningProofNotEnoughHeaders)
}

fn get_current_consensus_selected_tip_work(&self, current_consensus_pp_blue_work: BlueWorkType) -> BlueWorkType {
let hst_work = self.headers_selected_tip_store.read().get().unwrap().blue_work;

if hst_work <= current_consensus_pp_blue_work {
return 0.into();
}

hst_work - current_consensus_pp_blue_work
}

fn init_validate_pruning_point_proof_stores_and_processes(
&self,
proof: &PruningPointProof,
Expand Down
13 changes: 8 additions & 5 deletions protocol/flows/src/v5/ibd/flow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use kaspa_consensus_core::{
api::BlockValidationFuture,
block::Block,
header::Header,
pruning::{PruningPointProof, PruningPointsList},
pruning::{PruningPointProof, PruningPointsList, PruningProofMetadata},
BlockHashSet,
};
use kaspa_consensusmanager::{spawn_blocking, ConsensusProxy, StagingConsensus};
Expand Down Expand Up @@ -218,27 +218,30 @@ impl IbdFlow {

let staging_session = staging.session().await;

let pruning_point = self.sync_and_validate_pruning_proof(&staging_session).await?;
let pruning_point = self.sync_and_validate_pruning_proof(&staging_session, relay_block).await?;
self.sync_headers(&staging_session, syncer_virtual_selected_parent, pruning_point, relay_block).await?;
staging_session.async_validate_pruning_points().await?;
self.validate_staging_timestamps(&self.ctx.consensus().session().await, &staging_session).await?;
self.sync_pruning_point_utxoset(&staging_session, pruning_point).await?;
Ok(())
}

async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy) -> Result<Hash, ProtocolError> {
async fn sync_and_validate_pruning_proof(&mut self, staging: &ConsensusProxy, relay_block: &Block) -> Result<Hash, ProtocolError> {
self.router.enqueue(make_message!(Payload::RequestPruningPointProof, RequestPruningPointProofMessage {})).await?;

// Pruning proof generation and communication might take several minutes, so we allow a long 10 minute timeout
let msg = dequeue_with_timeout!(self.incoming_route, Payload::PruningPointProof, Duration::from_secs(600))?;
let proof: PruningPointProof = msg.try_into()?;
debug!("received proof with overall {} headers", proof.iter().map(|l| l.len()).sum::<usize>());

let proof_metadata = PruningProofMetadata::new(relay_block.header.blue_work);

// Get a new session for current consensus (non staging)
let consensus = self.ctx.consensus().session().await;

// The proof is validated in the context of current consensus
let proof = consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof).map(|()| proof)).await?;
let proof =
consensus.clone().spawn_blocking(move |c| c.validate_pruning_proof(&proof, &proof_metadata).map(|()| proof)).await?;

let proof_pruning_point = proof[0].last().expect("was just ensured by validation").hash;

Expand Down Expand Up @@ -316,7 +319,7 @@ impl IbdFlow {
if mismatch_detected {
info!("Validating the locally built proof (sanity test fallback #2)");
// Note: the proof is validated in the context of *current* consensus
if let Err(err) = con.validate_pruning_proof(&built_proof) {
if let Err(err) = con.validate_pruning_proof(&built_proof, &proof_metadata) {
panic!("Locally built proof failed validation: {}", err);
}
info!("Locally built proof was validated successfully");
Expand Down

0 comments on commit e7efe41

Please sign in to comment.