Skip to content

Commit

Permalink
fix: VID ADVZ check consistency of multiplicity in verify_share (#653)
Browse files Browse the repository at this point in the history
* move tests from advz.rs to new file advz/test.rs

* new failing test

* do not panic for index out of bounds

* add check for consistency of multiplicity

* refactor test
  • Loading branch information
ggutoski authored Aug 15, 2024
1 parent e18f3bc commit f3ef0b9
Show file tree
Hide file tree
Showing 4 changed files with 365 additions and 299 deletions.
325 changes: 28 additions & 297 deletions vid/src/advz.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,30 +507,32 @@ where
commit: &Self::Commit,
) -> VidResult<Result<(), ()>> {
// check arguments
let multiplicity: usize = common.multiplicity.try_into().map_err(vid)?;
if share.evals.len() / multiplicity != common.poly_commits.len() {
if common.num_storage_nodes != self.num_storage_nodes {
return Err(VidError::Argument(format!(
"(share eval, common poly commit) lengths differ ({},{})",
share.evals.len() / multiplicity,
common.poly_commits.len()
"common num_storage_nodes {} differs from self {}",
common.num_storage_nodes, self.num_storage_nodes
)));
}

if common.num_storage_nodes != self.num_storage_nodes {
if common.multiplicity != self.multiplicity {
return Err(VidError::Argument(format!(
"common num_storage_nodes differs from self ({},{})",
common.num_storage_nodes, self.num_storage_nodes
"common multiplicity {} differs from self {}",
common.multiplicity, self.multiplicity
)));
}

let polys_len = common.poly_commits.len();
let multiplicity: usize = common.multiplicity.try_into().map_err(vid)?;
if share.evals.len() / multiplicity != common.poly_commits.len() {
return Err(VidError::Argument(format!(
"number of share evals / multiplicity {}/{} differs from number of common polynomial commitments {}",
share.evals.len(), multiplicity,
common.poly_commits.len()
)));
}
Self::is_consistent(commit, common)?;

if share.index >= self.num_storage_nodes {
return Ok(Err(())); // not an arg error
}

Self::is_consistent(commit, common)?;

// verify eval proof
// TODO: check all indices that represents the shares
if KzgEvalsMerkleTree::<E, H>::verify(
Expand Down Expand Up @@ -567,9 +569,20 @@ where
// some boilerplate needed to accommodate builds without `parallel`
// feature.
let multiplicities = Vec::from_iter((0..self.multiplicity as usize));
let polys_len = common.poly_commits.len();
let verification_iter = parallelizable_slice_iter(&multiplicities).map(|i| {
let range = i * polys_len..(i + 1) * polys_len;
let aggregate_eval = polynomial_eval(
share.evals[i * polys_len..(i + 1) * polys_len]
share
.evals
.get(range.clone())
.ok_or_else(|| {
VidError::Internal(anyhow::anyhow!(
"share evals range {:?} out of bounds for length {}",
range,
share.evals.len()
))
})?
.iter()
.map(FieldMultiplier),
pseudorandom_scalar,
Expand Down Expand Up @@ -985,286 +998,4 @@ where
}

#[cfg(test)]
mod tests {
use super::{VidError::Argument, *};
use ark_bls12_381::Bls12_381;
use ark_bn254::Bn254;
use ark_std::{
rand::{CryptoRng, RngCore},
vec,
};
use jf_pcs::{checked_fft_size, prelude::UnivariateUniversalParams};
use sha2::Sha256;

#[ignore]
#[test]
fn disperse_timer() {
// run with 'print-trace' feature to see timer output
let (recovery_threshold, num_storage_nodes) = (256, 512);
let mut rng = jf_utils::test_rng();
let srs = init_srs(recovery_threshold as usize, &mut rng);
#[cfg(feature = "gpu-vid")]
let mut advz_gpu =
AdvzGPU::<'_, Bn254, Sha256>::new(num_storage_nodes, recovery_threshold, &srs).unwrap();
let mut advz =
Advz::<Bn254, Sha256>::new(num_storage_nodes, recovery_threshold, srs).unwrap();

let payload_random = init_random_payload(1 << 25, &mut rng);

#[cfg(feature = "gpu-vid")]
let _ = advz_gpu.disperse(payload_random.clone());
let _ = advz.disperse(payload_random);
}

#[ignore]
#[test]
fn commit_only_timer() {
// run with 'print-trace' feature to see timer output
let (recovery_threshold, num_storage_nodes) = (256, 512);
let mut rng = jf_utils::test_rng();
let srs = init_srs(recovery_threshold as usize, &mut rng);
#[cfg(feature = "gpu-vid")]
let mut advz_gpu =
AdvzGPU::<'_, Bn254, Sha256>::new(num_storage_nodes, recovery_threshold, &srs).unwrap();
let mut advz =
Advz::<Bn254, Sha256>::new(num_storage_nodes, recovery_threshold, srs).unwrap();

let payload_random = init_random_payload(1 << 25, &mut rng);

#[cfg(feature = "gpu-vid")]
let _ = advz_gpu.commit_only(payload_random.clone());

let _ = advz.commit_only(payload_random);
}

#[test]
fn sad_path_verify_share_corrupt_share() {
let (mut advz, bytes_random) = advz_init();
let disperse = advz.disperse(bytes_random).unwrap();
let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit);

for (i, share) in shares.iter().enumerate() {
// missing share eval
{
let share_missing_eval = Share {
evals: share.evals[1..].to_vec(),
..share.clone()
};
assert_arg_err(
advz.verify_share(&share_missing_eval, &common, &commit),
"1 missing share should be arg error",
);
}

// corrupted share eval
{
let mut share_bad_eval = share.clone();
share_bad_eval.evals[0].double_in_place();
advz.verify_share(&share_bad_eval, &common, &commit)
.unwrap()
.expect_err("bad share value should fail verification");
}

// corrupted index, in bounds
{
let share_bad_index = Share {
index: (share.index + 1) % advz.num_storage_nodes,
..share.clone()
};
advz.verify_share(&share_bad_index, &common, &commit)
.unwrap()
.expect_err("bad share index should fail verification");
}

// corrupted index, out of bounds
{
let share_bad_index = Share {
index: share.index + advz.num_storage_nodes,
..share.clone()
};
advz.verify_share(&share_bad_index, &common, &commit)
.unwrap()
.expect_err("bad share index should fail verification");
}

// corrupt eval proof
{
// We have no way to corrupt a proof
// (without also causing a deserialization failure).
// So we use another share's proof instead.
let share_bad_evals_proof = Share {
evals_proof: shares[(i + 1) % shares.len()].evals_proof.clone(),
..share.clone()
};
advz.verify_share(&share_bad_evals_proof, &common, &commit)
.unwrap()
.expect_err("bad share evals proof should fail verification");
}
}
}

#[test]
fn sad_path_verify_share_corrupt_commit() {
let (mut advz, bytes_random) = advz_init();
let disperse = advz.disperse(bytes_random).unwrap();
let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit);

// missing commit
let common_missing_item = Common {
poly_commits: common.poly_commits[1..].to_vec(),
..common.clone()
};
assert_arg_err(
advz.verify_share(&shares[0], &common_missing_item, &commit),
"1 missing commit should be arg error",
);

// 1 corrupt commit, poly_commit
let common_1_poly_corruption = {
let mut corrupted = common.clone();
corrupted.poly_commits[0] = <Bls12_381 as Pairing>::G1Affine::zero().into();
corrupted
};
assert_arg_err(
advz.verify_share(&shares[0], &common_1_poly_corruption, &commit),
"corrupted commit should be arg error",
);

// 1 corrupt commit, all_evals_digest
let common_1_digest_corruption = {
let mut corrupted = common;
let mut digest_bytes = vec![0u8; corrupted.all_evals_digest.uncompressed_size()];
corrupted
.all_evals_digest
.serialize_uncompressed(&mut digest_bytes)
.expect("digest serialization should succeed");
digest_bytes[0] += 1;
corrupted.all_evals_digest =
HasherNode::deserialize_uncompressed(digest_bytes.as_slice())
.expect("digest deserialization should succeed");
corrupted
};
advz.verify_share(&shares[0], &common_1_digest_corruption, &commit)
.unwrap()
.expect_err("1 corrupt all_evals_digest should fail verification");
}

#[test]
fn sad_path_verify_share_corrupt_share_and_commit() {
let (mut advz, bytes_random) = advz_init();
let disperse = advz.disperse(bytes_random).unwrap();
let (mut shares, mut common, commit) = (disperse.shares, disperse.common, disperse.commit);

common.poly_commits.pop();
shares[0].evals.pop();

// equal nonzero lengths for common, share
assert_arg_err(
advz.verify_share(&shares[0], &common, &commit),
"common inconsistent with commit should be arg error",
);

common.poly_commits.clear();
shares[0].evals.clear();

// zero length for common, share
assert_arg_err(
advz.verify_share(&shares[0], &common, &commit),
"expect arg error for common inconsistent with commit",
);
}

#[test]
fn sad_path_recover_payload_corrupt_shares() {
let (mut advz, bytes_random) = advz_init();
let disperse = advz.disperse(&bytes_random).unwrap();
let (shares, common) = (disperse.shares, disperse.common);

{
// unequal share eval lengths
let mut shares_missing_evals = shares.clone();
for i in 0..shares_missing_evals.len() - 1 {
shares_missing_evals[i].evals.pop();
assert_arg_err(
advz.recover_payload(&shares_missing_evals, &common),
format!("{} shares missing 1 eval should be arg error", i + 1).as_str(),
);
}

// 1 eval missing from all shares
shares_missing_evals.last_mut().unwrap().evals.pop();
assert_arg_err(
advz.recover_payload(&shares_missing_evals, &common),
format!(
"shares contain {} but expected {}",
shares_missing_evals[0].evals.len(),
&common.poly_commits.len()
)
.as_str(),
);
}

// corrupted index, in bounds
{
let mut shares_bad_indices = shares.clone();

// permute indices to avoid duplicates and keep them in bounds
for share in &mut shares_bad_indices {
share.index = (share.index + 1) % advz.num_storage_nodes;
}

let bytes_recovered = advz
.recover_payload(&shares_bad_indices, &common)
.expect("recover_payload should succeed when indices are in bounds");
assert_ne!(bytes_recovered, bytes_random);
}

// corrupted index, out of bounds
{
let mut shares_bad_indices = shares.clone();
for i in 0..shares_bad_indices.len() {
shares_bad_indices[i].index +=
u32::try_from(advz.multi_open_domain.size()).unwrap();
advz.recover_payload(&shares_bad_indices, &common)
.expect_err("recover_payload should fail when indices are out of bounds");
}
}
}

/// Routine initialization tasks.
///
/// Returns the following tuple:
/// 1. An initialized [`Advz`] instance.
/// 2. A `Vec<u8>` filled with random bytes.
pub(super) fn advz_init() -> (Advz<Bls12_381, Sha256>, Vec<u8>) {
let (recovery_threshold, num_storage_nodes) = (4, 6);
let mut rng = jf_utils::test_rng();
let srs = init_srs(recovery_threshold as usize, &mut rng);
let advz = Advz::new(num_storage_nodes, recovery_threshold, srs).unwrap();
let bytes_random = init_random_payload(4000, &mut rng);
(advz, bytes_random)
}

/// Convenience wrapper to assert [`VidError::Argument`] return value.
pub(super) fn assert_arg_err<T>(res: VidResult<T>, msg: &str) {
assert!(matches!(res, Err(Argument(_))), "{}", msg);
}

pub(super) fn init_random_payload<R>(len: usize, rng: &mut R) -> Vec<u8>
where
R: RngCore + CryptoRng,
{
let mut bytes_random = vec![0u8; len];
rng.fill_bytes(&mut bytes_random);
bytes_random
}

pub(super) fn init_srs<E, R>(num_coeffs: usize, rng: &mut R) -> UnivariateUniversalParams<E>
where
E: Pairing,
R: RngCore + CryptoRng,
{
UnivariateKzgPCS::gen_srs_for_testing(rng, checked_fft_size(num_coeffs - 1).unwrap())
.unwrap()
}
}
mod test;
2 changes: 1 addition & 1 deletion vid/src/advz/payload_prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ mod tests {
advz::{
bytes_to_field::elem_byte_capacity,
payload_prover::{LargeRangeProof, SmallRangeProof, Statement},
tests::*,
test::*,
*,
},
payload_prover::PayloadProver,
Expand Down
2 changes: 1 addition & 1 deletion vid/src/advz/precomputable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ where
mod tests {
use crate::{
advz::{
tests::{advz_init, init_random_payload, init_srs},
test::{advz_init, init_random_payload, init_srs},
Advz,
},
precomputable::Precomputable,
Expand Down
Loading

0 comments on commit f3ef0b9

Please sign in to comment.