diff --git a/.cargo/config.toml b/.cargo/config.toml index 0aaf0bbf6c..d9b2e600cf 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -3,7 +3,6 @@ rustflags = ["-C", "target-feature=+aes"] [target.'cfg(target_arch = "aarch64")'] -# TODO: Try to remove once https://github.com/paritytech/substrate/issues/11538 is resolved -# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least -# 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 +# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships: +# https://github.com/RustCrypto/block-ciphers/pull/395 rustflags = ["--cfg", "aes_armv8"] diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5e6df9a40c..fab896327c 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -24,8 +24,8 @@ env: CARGO_TERM_COLOR: always # Build smaller artifacts to avoid running out of space in CI # TODO: Try to remove once https://github.com/paritytech/substrate/issues/11538 is resolved - # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least - # 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 + # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships: + # https://github.com/RustCrypto/block-ciphers/pull/395 RUSTFLAGS: -C strip=symbols -C opt-level=s --cfg aes_armv8 jobs: diff --git a/.github/workflows/snapshot-build.yml b/.github/workflows/snapshot-build.yml index 8a61d0fcf4..f068dfa069 100644 --- a/.github/workflows/snapshot-build.yml +++ b/.github/workflows/snapshot-build.yml @@ -103,14 +103,14 @@ jobs: - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-20.04"') }} target: aarch64-unknown-linux-gnu suffix: ubuntu-aarch64-${{ github.ref_name }} - # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to - # at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 + # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships: + # https://github.com/RustCrypto/block-ciphers/pull/395 rustflags: "-C linker=aarch64-linux-gnu-gcc --cfg aes_armv8" - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-12-arm64"]' || '"macos-12"') }} target: aarch64-apple-darwin suffix: macos-aarch64-${{ github.ref_name }} - # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to - # at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373 + # TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships: + # https://github.com/RustCrypto/block-ciphers/pull/395 rustflags: "--cfg aes_armv8" - os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-12-arm64"]' || '"macos-12"') }} target: x86_64-apple-darwin diff --git a/Cargo.lock b/Cargo.lock index 3a00961ed0..56d3de489c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11929,7 +11929,6 @@ dependencies = [ "frame-support", "frame-system", "pallet-transaction-payment", - "serde", "sp-core", "sp-io", "sp-runtime", diff --git a/Dockerfile-bootstrap-node.aarch64 b/Dockerfile-bootstrap-node.aarch64 index 6c4a49a3cb..388677c636 100644 --- a/Dockerfile-bootstrap-node.aarch64 +++ b/Dockerfile-bootstrap-node.aarch64 @@ -50,9 +50,6 @@ RUN \ gcc-aarch64-linux-gnu \ libc6-dev-arm64-cross -# TODO: Following package is not necessary on Ubuntu 22.04, but RocksDb compilation fails otherwise on Ubuntu 20.04 -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends g++-9-multilib - RUN \ /root/.cargo/bin/cargo -Zgitoxide -Zgit build \ --locked \ diff --git a/Dockerfile-farmer.aarch64 b/Dockerfile-farmer.aarch64 index dca67be413..78e706b1e8 100644 --- a/Dockerfile-farmer.aarch64 +++ b/Dockerfile-farmer.aarch64 @@ -50,9 +50,6 @@ RUN \ gcc-aarch64-linux-gnu \ libc6-dev-arm64-cross -# TODO: Following package is not necessary on Ubuntu 22.04, but RocksDb compilation fails otherwise on Ubuntu 20.04 -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends g++-9-multilib - RUN \ /root/.cargo/bin/cargo -Zgitoxide -Zgit build \ --locked \ diff --git a/crates/sc-proof-of-time/src/source/gossip.rs b/crates/sc-proof-of-time/src/source/gossip.rs index 1000bdd1a4..b97a66f0c5 100644 --- a/crates/sc-proof-of-time/src/source/gossip.rs +++ b/crates/sc-proof-of-time/src/source/gossip.rs @@ -20,11 +20,10 @@ use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; use std::cmp; use std::collections::{HashMap, VecDeque}; use std::future::poll_fn; -use std::hash::{Hash, Hasher}; use std::num::{NonZeroU32, NonZeroUsize}; use std::pin::pin; use std::sync::{atomic, Arc}; -use subspace_core_primitives::{PotCheckpoints, PotSeed, SlotNumber}; +use subspace_core_primitives::{PotCheckpoints, PotSeed}; use tracing::{debug, error, trace, warn}; /// How many slots can proof be before it is too far @@ -88,7 +87,7 @@ pub fn pot_gossip_peers_set_config() -> ( (cfg, notification_service) } -#[derive(Debug, Copy, Clone, Eq, PartialEq, Encode, Decode)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Encode, Decode)] pub(super) struct GossipProof { /// Slot number pub(super) slot: Slot, @@ -100,16 +99,6 @@ pub(super) struct GossipProof { pub(super) checkpoints: PotCheckpoints, } -// TODO: Replace with derive once `Slot` implements `Hash` -impl Hash for GossipProof { - fn hash(&self, state: &mut H) { - SlotNumber::from(self.slot).hash(state); - self.seed.hash(state); - self.slot_iterations.hash(state); - self.checkpoints.hash(state); - } -} - #[derive(Debug)] pub(super) enum ToGossipMessage { Proof(GossipProof), diff --git a/crates/subspace-farmer-components/benches/reading.rs b/crates/subspace-farmer-components/benches/reading.rs index f9669fbac5..87ec476f4f 100644 --- a/crates/subspace-farmer-components/benches/reading.rs +++ b/crates/subspace-farmer-components/benches/reading.rs @@ -194,8 +194,8 @@ pub fn criterion_benchmark(c: &mut Criterion) { group.throughput(Throughput::Elements(sectors_count)); group.bench_function("piece/disk", |b| { b.iter(|| { - for sector_index in 0..sectors_count as usize { - let sector = plot_file.offset(sector_index * sector_size); + for sector_index in 0..sectors_count { + let sector = plot_file.offset(sector_index * sector_size as u64); read_piece::( black_box(piece_offset), black_box(&plotted_sector.sector_id), diff --git a/crates/subspace-farmer-components/src/auditing.rs b/crates/subspace-farmer-components/src/auditing.rs index 5787d74646..30aa628b39 100644 --- a/crates/subspace-farmer-components/src/auditing.rs +++ b/crates/subspace-farmer-components/src/auditing.rs @@ -139,8 +139,8 @@ where } let sector = plot.offset( - usize::from(sector_metadata.sector_index) - * sector_size(sector_metadata.pieces_in_sector), + u64::from(sector_metadata.sector_index) + * sector_size(sector_metadata.pieces_in_sector) as u64, ); let mut s_bucket = vec![0; sector_auditing_info.s_bucket_audit_size]; @@ -186,7 +186,7 @@ struct SectorAuditingDetails { /// Size in bytes s_bucket_audit_size: usize, /// Offset in bytes - s_bucket_audit_offset_in_sector: usize, + s_bucket_audit_offset_in_sector: u64, } fn collect_sector_auditing_details( @@ -200,19 +200,19 @@ fn collect_sector_auditing_details( let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index(); let s_bucket_audit_size = Scalar::FULL_BYTES * usize::from(sector_metadata.s_bucket_sizes[usize::from(s_bucket_audit_index)]); - let s_bucket_audit_offset = Scalar::FULL_BYTES + let s_bucket_audit_offset = Scalar::FULL_BYTES as u64 * sector_metadata .s_bucket_sizes .iter() .take(s_bucket_audit_index.into()) .copied() - .map(usize::from) - .sum::(); + .map(u64::from) + .sum::(); let sector_contents_map_size = SectorContentsMap::encoded_size(sector_metadata.pieces_in_sector); - let s_bucket_audit_offset_in_sector = sector_contents_map_size + s_bucket_audit_offset; + let s_bucket_audit_offset_in_sector = sector_contents_map_size as u64 + s_bucket_audit_offset; SectorAuditingDetails { sector_id, diff --git a/crates/subspace-farmer-components/src/lib.rs b/crates/subspace-farmer-components/src/lib.rs index 0576792ff9..de0186b96b 100644 --- a/crates/subspace-farmer-components/src/lib.rs +++ b/crates/subspace-farmer-components/src/lib.rs @@ -125,8 +125,7 @@ where /// thread pool pub trait ReadAtSync: Send + Sync { /// Get implementation of [`ReadAtSync`] that add specified offset to all attempted reads - // TODO: Should offset and reads be in u64? - fn offset(&self, offset: usize) -> ReadAtOffset<'_, Self> + fn offset(&self, offset: u64) -> ReadAtOffset<'_, Self> where Self: Sized, { @@ -137,11 +136,11 @@ pub trait ReadAtSync: Send + Sync { } /// Fill the buffer by reading bytes at a specific offset - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()>; + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()>; } impl ReadAtSync for ! { - fn read_at(&self, _buf: &mut [u8], _offset: usize) -> io::Result<()> { + fn read_at(&self, _buf: &mut [u8], _offset: u64) -> io::Result<()> { unreachable!("Is never called") } } @@ -187,7 +186,7 @@ where /// concurrent async combinators pub trait ReadAtAsync { /// Get implementation of [`ReadAtAsync`] that add specified offset to all attempted reads - fn offset(&self, offset: usize) -> ReadAtOffset<'_, Self> + fn offset(&self, offset: u64) -> ReadAtOffset<'_, Self> where Self: Sized, { @@ -198,14 +197,14 @@ pub trait ReadAtAsync { } /// Fill the buffer by reading bytes at a specific offset and return the buffer back - fn read_at(&self, buf: B, offset: usize) -> impl Future> + fn read_at(&self, buf: B, offset: u64) -> impl Future> where AsyncReadBytes: From, B: AsMut<[u8]> + Unpin + 'static; } impl ReadAtAsync for ! { - async fn read_at(&self, _buf: B, _offset: usize) -> io::Result + async fn read_at(&self, _buf: B, _offset: u64) -> io::Result where AsyncReadBytes: From, B: AsMut<[u8]> + Unpin + 'static, @@ -215,56 +214,56 @@ impl ReadAtAsync for ! { } impl ReadAtSync for [u8] { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { - if buf.len() + offset > self.len() { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { + if buf.len() as u64 + offset > self.len() as u64 { return Err(io::Error::new( io::ErrorKind::InvalidInput, "Buffer length with offset exceeds own length", )); } - buf.copy_from_slice(&self[offset..][..buf.len()]); + buf.copy_from_slice(&self[offset as usize..][..buf.len()]); Ok(()) } } impl ReadAtSync for &[u8] { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { - if buf.len() + offset > self.len() { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { + if buf.len() as u64 + offset > self.len() as u64 { return Err(io::Error::new( io::ErrorKind::InvalidInput, "Buffer length with offset exceeds own length", )); } - buf.copy_from_slice(&self[offset..][..buf.len()]); + buf.copy_from_slice(&self[offset as usize..][..buf.len()]); Ok(()) } } impl ReadAtSync for Vec { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { self.as_slice().read_at(buf, offset) } } impl ReadAtSync for &Vec { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { self.as_slice().read_at(buf, offset) } } impl ReadAtSync for File { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { - self.read_exact_at(buf, offset as u64) + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { + self.read_exact_at(buf, offset) } } impl ReadAtSync for &File { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { - self.read_exact_at(buf, offset as u64) + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { + self.read_exact_at(buf, offset) } } @@ -272,14 +271,14 @@ impl ReadAtSync for &File { #[derive(Debug, Copy, Clone)] pub struct ReadAtOffset<'a, T> { inner: &'a T, - offset: usize, + offset: u64, } impl ReadAtSync for ReadAtOffset<'_, T> where T: ReadAtSync, { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { self.inner.read_at(buf, offset + self.offset) } } @@ -288,7 +287,7 @@ impl ReadAtSync for &ReadAtOffset<'_, T> where T: ReadAtSync, { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { self.inner.read_at(buf, offset + self.offset) } } @@ -297,7 +296,7 @@ impl ReadAtAsync for ReadAtOffset<'_, T> where T: ReadAtAsync, { - async fn read_at(&self, buf: B, offset: usize) -> io::Result + async fn read_at(&self, buf: B, offset: u64) -> io::Result where AsyncReadBytes: From, B: AsMut<[u8]> + Unpin + 'static, @@ -310,7 +309,7 @@ impl ReadAtAsync for &ReadAtOffset<'_, T> where T: ReadAtAsync, { - async fn read_at(&self, buf: B, offset: usize) -> io::Result + async fn read_at(&self, buf: B, offset: u64) -> io::Result where AsyncReadBytes: From, B: AsMut<[u8]> + Unpin + 'static, diff --git a/crates/subspace-farmer-components/src/reading.rs b/crates/subspace-farmer-components/src/reading.rs index 0bfe08be0f..a834b0ba8d 100644 --- a/crates/subspace-farmer-components/src/reading.rs +++ b/crates/subspace-farmer-components/src/reading.rs @@ -29,7 +29,7 @@ pub enum ReadingError { #[error("Failed to read chunk at location {chunk_location}")] FailedToReadChunk { /// Chunk location - chunk_location: usize, + chunk_location: u64, /// Low-level error error: io::Error, }, @@ -44,7 +44,7 @@ pub enum ReadingError { /// Indicates whether chunk was encoded encoded_chunk_used: bool, /// Chunk location - chunk_location: usize, + chunk_location: u64, /// Lower-level error error: String, }, @@ -133,7 +133,7 @@ where |((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| { let (chunk_offset, encoded_chunk_used) = maybe_chunk_details?; - let chunk_location = chunk_offset + s_bucket_offset as usize; + let chunk_location = chunk_offset as u64 + u64::from(s_bucket_offset); Some(( maybe_record_chunk, @@ -153,8 +153,8 @@ where sector .read_at( &mut record_chunk, - SectorContentsMap::encoded_size(pieces_in_sector) - + chunk_location * Scalar::FULL_BYTES, + SectorContentsMap::encoded_size(pieces_in_sector) as u64 + + chunk_location * Scalar::FULL_BYTES as u64, ) .map_err(|error| ReadingError::FailedToReadChunk { chunk_location, @@ -195,8 +195,8 @@ where §or .read_at( vec![0; Scalar::FULL_BYTES], - SectorContentsMap::encoded_size(pieces_in_sector) - + chunk_location * Scalar::FULL_BYTES, + SectorContentsMap::encoded_size(pieces_in_sector) as u64 + + chunk_location * Scalar::FULL_BYTES as u64, ) .await .map_err(|error| ReadingError::FailedToReadChunk { @@ -321,11 +321,11 @@ where S: ReadAtSync, A: ReadAtAsync, { - let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector) - + sector_record_chunks_size(pieces_in_sector); + let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector) as u64 + + sector_record_chunks_size(pieces_in_sector) as u64; // Move to the beginning of the commitment and witness we care about let record_metadata_offset = - sector_metadata_start + RecordMetadata::encoded_size() * usize::from(piece_offset); + sector_metadata_start + RecordMetadata::encoded_size() as u64 * u64::from(piece_offset); let mut record_metadata_bytes = vec![0; RecordMetadata::encoded_size()]; match sector { diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs index 264e25fb8d..b1c48ff026 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/commands/farm.rs @@ -10,7 +10,6 @@ use clap::{Parser, ValueHint}; use futures::channel::oneshot; use futures::stream::FuturesUnordered; use futures::{FutureExt, StreamExt}; -use lru::LruCache; use parking_lot::Mutex; use prometheus_client::registry::Registry; use std::fs; @@ -49,8 +48,6 @@ use tokio::sync::Semaphore; use tracing::{debug, error, info, info_span, warn}; use zeroize::Zeroizing; -const RECORDS_ROOTS_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1_000_000).expect("Not zero; qed"); - fn should_farm_during_initial_plotting() -> bool { let total_cpu_cores = all_cpu_cores() .iter() @@ -424,14 +421,10 @@ where .expect("Not zero; qed"), ) .map_err(|error| anyhow::anyhow!(error))?; - // TODO: Consider introducing and using global in-memory segment header cache (this comment is - // in multiple files) - let segment_commitments_cache = Arc::new(Mutex::new(LruCache::new(RECORDS_ROOTS_CACHE_SIZE))); let validator = Some(SegmentCommitmentPieceValidator::new( node.clone(), node_client.clone(), kzg.clone(), - segment_commitments_cache, )); let piece_provider = PieceProvider::new(node.clone(), validator.clone()); diff --git a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs index cc1f343041..588e078c5a 100644 --- a/crates/subspace-farmer/src/bin/subspace-farmer/main.rs +++ b/crates/subspace-farmer/src/bin/subspace-farmer/main.rs @@ -110,12 +110,10 @@ async fn main() -> anyhow::Result<()> { } for disk_farm in &disk_farms { - // TODO: Delete this section once we don't have shared data anymore - info!("Wiping shared data"); - let _ = fs::remove_file(disk_farm.join("known_addresses_db")); - let _ = fs::remove_file(disk_farm.join("known_addresses.bin")); - let _ = fs::remove_file(disk_farm.join("piece_cache_db")); - let _ = fs::remove_file(disk_farm.join("providers_db")); + if disk_farm.join("known_addresses.bin").exists() { + info!("Wiping known addresses"); + let _ = fs::remove_file(disk_farm.join("known_addresses.bin")); + } SingleDiskFarm::wipe(disk_farm)?; } diff --git a/crates/subspace-farmer/src/single_disk_farm.rs b/crates/subspace-farmer/src/single_disk_farm.rs index 6c2f472d61..ff653ec4d2 100644 --- a/crates/subspace-farmer/src/single_disk_farm.rs +++ b/crates/subspace-farmer/src/single_disk_farm.rs @@ -142,13 +142,6 @@ impl SingleDiskFarmInfo { /// Load `SingleDiskFarm` from path is supposed to be stored, `None` means no info file was /// found, happens during first start. pub fn load_from(directory: &Path) -> io::Result> { - // TODO: Remove this compatibility hack after enough time has passed - if directory.join("single_disk_plot.json").exists() { - fs::rename( - directory.join("single_disk_plot.json"), - directory.join(Self::FILE_NAME), - )?; - } let bytes = match fs::read(directory.join(Self::FILE_NAME)) { Ok(bytes) => bytes, Err(error) => { @@ -1407,20 +1400,11 @@ impl SingleDiskFarm { DiskPieceCache::wipe(directory)?; - // TODO: Remove this compatibility hack after enough time has passed - if directory.join("single_disk_plot.json").exists() { - info!( - "Deleting info file at {}", - directory.join("single_disk_plot.json").display() - ); - fs::remove_file(directory.join("single_disk_plot.json")) - } else { - info!( - "Deleting info file at {}", - single_disk_info_info_path.display() - ); - fs::remove_file(single_disk_info_info_path) - } + info!( + "Deleting info file at {}", + single_disk_info_info_path.display() + ); + fs::remove_file(single_disk_info_info_path) } /// Check the farm for corruption and repair errors (caused by disk errors or something else), diff --git a/crates/subspace-farmer/src/single_disk_farm/farming/rayon_files.rs b/crates/subspace-farmer/src/single_disk_farm/farming/rayon_files.rs index be13329b71..a4745d8c39 100644 --- a/crates/subspace-farmer/src/single_disk_farm/farming/rayon_files.rs +++ b/crates/subspace-farmer/src/single_disk_farm/farming/rayon_files.rs @@ -11,7 +11,7 @@ pub struct RayonFiles { } impl ReadAtSync for RayonFiles { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { let thread_index = rayon::current_thread_index().unwrap_or_default(); let file = self.files.get(thread_index).ok_or_else(|| { io::Error::new(io::ErrorKind::Other, "No files entry for this rayon thread") @@ -22,7 +22,7 @@ impl ReadAtSync for RayonFiles { } impl ReadAtSync for &RayonFiles { - fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> { + fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> { (*self).read_at(buf, offset) } } diff --git a/crates/subspace-farmer/src/single_disk_farm/piece_reader.rs b/crates/subspace-farmer/src/single_disk_farm/piece_reader.rs index 041bd9b055..d77d3f1654 100644 --- a/crates/subspace-farmer/src/single_disk_farm/piece_reader.rs +++ b/crates/subspace-farmer/src/single_disk_farm/piece_reader.rs @@ -157,7 +157,7 @@ async fn read_pieces( } let sector_size = sector_size(pieces_in_sector); - let sector = plot_file.offset(sector_index as usize * sector_size); + let sector = plot_file.offset(u64::from(sector_index) * sector_size as u64); let maybe_piece = read_piece::( &public_key, diff --git a/crates/subspace-farmer/src/utils/piece_validator.rs b/crates/subspace-farmer/src/utils/piece_validator.rs index 824a02e0e1..4c6657caec 100644 --- a/crates/subspace-farmer/src/utils/piece_validator.rs +++ b/crates/subspace-farmer/src/utils/piece_validator.rs @@ -1,11 +1,8 @@ use crate::NodeClient; use async_trait::async_trait; -use lru::LruCache; -use parking_lot::Mutex; -use std::sync::Arc; use subspace_archiving::archiver::is_piece_valid; use subspace_core_primitives::crypto::kzg::Kzg; -use subspace_core_primitives::{Piece, PieceIndex, SegmentCommitment, SegmentIndex}; +use subspace_core_primitives::{Piece, PieceIndex}; use subspace_networking::libp2p::PeerId; use subspace_networking::utils::piece_provider::PieceValidator; use subspace_networking::Node; @@ -16,21 +13,14 @@ pub struct SegmentCommitmentPieceValidator { dsn_node: Node, node_client: NC, kzg: Kzg, - segment_commitment_cache: Arc>>, } impl SegmentCommitmentPieceValidator { - pub fn new( - dsn_node: Node, - node_client: NC, - kzg: Kzg, - segment_commitment_cache: Arc>>, - ) -> Self { + pub fn new(dsn_node: Node, node_client: NC, kzg: Kzg) -> Self { Self { dsn_node, node_client, kzg, - segment_commitment_cache, } } } @@ -52,44 +42,27 @@ where let segment_index = piece_index.segment_index(); - let maybe_segment_commitment = self - .segment_commitment_cache - .lock() - .get(&segment_index) - .copied(); - let segment_commitment = match maybe_segment_commitment { - Some(segment_commitment) => segment_commitment, - None => { - let segment_headers = - match self.node_client.segment_headers(vec![segment_index]).await { - Ok(segment_headers) => segment_headers, - Err(error) => { - error!( - %piece_index, - ?error, - "Failed tor retrieve segment headers from node" - ); - return None; - } - }; - - let segment_commitment = match segment_headers.into_iter().next().flatten() { - Some(segment_header) => segment_header.segment_commitment(), - None => { - error!( - %piece_index, - %segment_index, - "Segment commitment for segment index wasn't found on node" - ); - return None; - } - }; - - self.segment_commitment_cache - .lock() - .push(segment_index, segment_commitment); + let segment_headers = match self.node_client.segment_headers(vec![segment_index]).await { + Ok(segment_headers) => segment_headers, + Err(error) => { + error!( + %piece_index, + ?error, + "Failed tor retrieve segment headers from node" + ); + return None; + } + }; - segment_commitment + let segment_commitment = match segment_headers.into_iter().next().flatten() { + Some(segment_header) => segment_header.segment_commitment(), + None => { + error!( + %piece_index, + %segment_index, + "Segment commitment for segment index wasn't found on node" + ); + return None; } }; diff --git a/crates/subspace-runtime-primitives/Cargo.toml b/crates/subspace-runtime-primitives/Cargo.toml index 4a5239935c..725c31e9aa 100644 --- a/crates/subspace-runtime-primitives/Cargo.toml +++ b/crates/subspace-runtime-primitives/Cargo.toml @@ -19,10 +19,6 @@ targets = ["x86_64-unknown-linux-gnu"] frame-support = { version = "4.0.0-dev", default-features = false, optional = true, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } frame-system = { version = "4.0.0-dev", default-features = false, optional = true, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } -# TODO: Should, idealy, be optional, but `sp-runtime`'s `serde` feature is enabled unconditionally by something in -# Substrate and as the result our custom `Block` implementation has to derive `serde` traits essentially -# unconditionally or else it doesn't compile -serde = { version = "1.0.195", default-features = false, features = ["alloc", "derive"] } sp-core = { version = "21.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-io = { version = "23.0.0", default-features = false, optional = true, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } sp-runtime = { version = "24.0.0", default-features = false, git = "https://github.com/subspace/polkadot-sdk", rev = "d6b500960579d73c43fc4ef550b703acfa61c4c8" } @@ -33,7 +29,6 @@ subspace-core-primitives = { version = "0.1.0", default-features = false, path = default = ["std"] std = [ "pallet-transaction-payment/std", - "serde/std", "sp-core/std", "sp-runtime/std", "sp-std/std", diff --git a/crates/subspace-service/src/lib.rs b/crates/subspace-service/src/lib.rs index a829b0809c..311a820826 100644 --- a/crates/subspace-service/src/lib.rs +++ b/crates/subspace-service/src/lib.rs @@ -1002,7 +1002,6 @@ where async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - // TODO: Would be nice if the whole header was passed in here let parent_header = client .header(parent_hash)? .expect("Parent header must always exist when block is created; qed"); diff --git a/crates/subspace-verification/src/lib.rs b/crates/subspace-verification/src/lib.rs index c942850dab..88c4786881 100644 --- a/crates/subspace-verification/src/lib.rs +++ b/crates/subspace-verification/src/lib.rs @@ -241,8 +241,6 @@ where return Err(Error::InvalidChunkWitness); } - // TODO: Check if sector already expired once we have such notion - if let Some(PieceCheckParams { max_pieces_in_sector, segment_commitment,