Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Todo cleanups #2504

Merged
merged 10 commits into from
Feb 5, 2024
5 changes: 2 additions & 3 deletions .cargo/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
rustflags = ["-C", "target-feature=+aes"]

[target.'cfg(target_arch = "aarch64")']
# TODO: Try to remove once https://github.com/paritytech/substrate/issues/11538 is resolved
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least
# 1.61: https://github.com/RustCrypto/block-ciphers/issues/373
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships:
# https://github.com/RustCrypto/block-ciphers/pull/395
rustflags = ["--cfg", "aes_armv8"]
4 changes: 2 additions & 2 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ env:
CARGO_TERM_COLOR: always
# Build smaller artifacts to avoid running out of space in CI
# TODO: Try to remove once https://github.com/paritytech/substrate/issues/11538 is resolved
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to at least
# 1.61: https://github.com/RustCrypto/block-ciphers/issues/373
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships:
# https://github.com/RustCrypto/block-ciphers/pull/395
RUSTFLAGS: -C strip=symbols -C opt-level=s --cfg aes_armv8

jobs:
Expand Down
8 changes: 4 additions & 4 deletions .github/workflows/snapshot-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -103,14 +103,14 @@ jobs:
- os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "ubuntu-20.04-x86-64"]' || '"ubuntu-20.04"') }}
target: aarch64-unknown-linux-gnu
suffix: ubuntu-aarch64-${{ github.ref_name }}
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to
# at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships:
# https://github.com/RustCrypto/block-ciphers/pull/395
rustflags: "-C linker=aarch64-linux-gnu-gcc --cfg aes_armv8"
- os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-12-arm64"]' || '"macos-12"') }}
target: aarch64-apple-darwin
suffix: macos-aarch64-${{ github.ref_name }}
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate bumps MSRV to
# at least 1.61: https://github.com/RustCrypto/block-ciphers/issues/373
# TODO: AES flag is such that we have decent performance on ARMv8, remove once `aes` crate with MSRV bump ships:
# https://github.com/RustCrypto/block-ciphers/pull/395
rustflags: "--cfg aes_armv8"
- os: ${{ fromJson(github.repository_owner == 'subspace' && '["self-hosted", "macos-12-arm64"]' || '"macos-12"') }}
target: x86_64-apple-darwin
Expand Down
1 change: 0 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 0 additions & 3 deletions Dockerfile-bootstrap-node.aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ RUN \
gcc-aarch64-linux-gnu \
libc6-dev-arm64-cross

# TODO: Following package is not necessary on Ubuntu 22.04, but RocksDb compilation fails otherwise on Ubuntu 20.04
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends g++-9-multilib

RUN \
/root/.cargo/bin/cargo -Zgitoxide -Zgit build \
--locked \
Expand Down
3 changes: 0 additions & 3 deletions Dockerfile-farmer.aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,6 @@ RUN \
gcc-aarch64-linux-gnu \
libc6-dev-arm64-cross

# TODO: Following package is not necessary on Ubuntu 22.04, but RocksDb compilation fails otherwise on Ubuntu 20.04
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends g++-9-multilib

RUN \
/root/.cargo/bin/cargo -Zgitoxide -Zgit build \
--locked \
Expand Down
15 changes: 2 additions & 13 deletions crates/sc-proof-of-time/src/source/gossip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,10 @@ use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT};
use std::cmp;
use std::collections::{HashMap, VecDeque};
use std::future::poll_fn;
use std::hash::{Hash, Hasher};
use std::num::{NonZeroU32, NonZeroUsize};
use std::pin::pin;
use std::sync::{atomic, Arc};
use subspace_core_primitives::{PotCheckpoints, PotSeed, SlotNumber};
use subspace_core_primitives::{PotCheckpoints, PotSeed};
use tracing::{debug, error, trace, warn};

/// How many slots can proof be before it is too far
Expand Down Expand Up @@ -88,7 +87,7 @@ pub fn pot_gossip_peers_set_config() -> (
(cfg, notification_service)
}

#[derive(Debug, Copy, Clone, Eq, PartialEq, Encode, Decode)]
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Encode, Decode)]
pub(super) struct GossipProof {
/// Slot number
pub(super) slot: Slot,
Expand All @@ -100,16 +99,6 @@ pub(super) struct GossipProof {
pub(super) checkpoints: PotCheckpoints,
}

// TODO: Replace with derive once `Slot` implements `Hash`
impl Hash for GossipProof {
fn hash<H: Hasher>(&self, state: &mut H) {
SlotNumber::from(self.slot).hash(state);
self.seed.hash(state);
self.slot_iterations.hash(state);
self.checkpoints.hash(state);
}
}

#[derive(Debug)]
pub(super) enum ToGossipMessage {
Proof(GossipProof),
Expand Down
4 changes: 2 additions & 2 deletions crates/subspace-farmer-components/benches/reading.rs
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ pub fn criterion_benchmark(c: &mut Criterion) {
group.throughput(Throughput::Elements(sectors_count));
group.bench_function("piece/disk", |b| {
b.iter(|| {
for sector_index in 0..sectors_count as usize {
let sector = plot_file.offset(sector_index * sector_size);
for sector_index in 0..sectors_count {
let sector = plot_file.offset(sector_index * sector_size as u64);
read_piece::<PosTable, _, _>(
black_box(piece_offset),
black_box(&plotted_sector.sector_id),
Expand Down
14 changes: 7 additions & 7 deletions crates/subspace-farmer-components/src/auditing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ where
}

let sector = plot.offset(
usize::from(sector_metadata.sector_index)
* sector_size(sector_metadata.pieces_in_sector),
u64::from(sector_metadata.sector_index)
* sector_size(sector_metadata.pieces_in_sector) as u64,
);

let mut s_bucket = vec![0; sector_auditing_info.s_bucket_audit_size];
Expand Down Expand Up @@ -186,7 +186,7 @@ struct SectorAuditingDetails {
/// Size in bytes
s_bucket_audit_size: usize,
/// Offset in bytes
s_bucket_audit_offset_in_sector: usize,
s_bucket_audit_offset_in_sector: u64,
}

fn collect_sector_auditing_details(
Expand All @@ -200,19 +200,19 @@ fn collect_sector_auditing_details(
let s_bucket_audit_index = sector_slot_challenge.s_bucket_audit_index();
let s_bucket_audit_size = Scalar::FULL_BYTES
* usize::from(sector_metadata.s_bucket_sizes[usize::from(s_bucket_audit_index)]);
let s_bucket_audit_offset = Scalar::FULL_BYTES
let s_bucket_audit_offset = Scalar::FULL_BYTES as u64
* sector_metadata
.s_bucket_sizes
.iter()
.take(s_bucket_audit_index.into())
.copied()
.map(usize::from)
.sum::<usize>();
.map(u64::from)
.sum::<u64>();

let sector_contents_map_size =
SectorContentsMap::encoded_size(sector_metadata.pieces_in_sector);

let s_bucket_audit_offset_in_sector = sector_contents_map_size + s_bucket_audit_offset;
let s_bucket_audit_offset_in_sector = sector_contents_map_size as u64 + s_bucket_audit_offset;

SectorAuditingDetails {
sector_id,
Expand Down
47 changes: 23 additions & 24 deletions crates/subspace-farmer-components/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,7 @@ where
/// thread pool
pub trait ReadAtSync: Send + Sync {
/// Get implementation of [`ReadAtSync`] that add specified offset to all attempted reads
// TODO: Should offset and reads be in u64?
fn offset(&self, offset: usize) -> ReadAtOffset<'_, Self>
fn offset(&self, offset: u64) -> ReadAtOffset<'_, Self>
where
Self: Sized,
{
Expand All @@ -137,11 +136,11 @@ pub trait ReadAtSync: Send + Sync {
}

/// Fill the buffer by reading bytes at a specific offset
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()>;
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()>;
}

impl ReadAtSync for ! {
fn read_at(&self, _buf: &mut [u8], _offset: usize) -> io::Result<()> {
fn read_at(&self, _buf: &mut [u8], _offset: u64) -> io::Result<()> {
unreachable!("Is never called")
}
}
Expand Down Expand Up @@ -187,7 +186,7 @@ where
/// concurrent async combinators
pub trait ReadAtAsync {
/// Get implementation of [`ReadAtAsync`] that add specified offset to all attempted reads
fn offset(&self, offset: usize) -> ReadAtOffset<'_, Self>
fn offset(&self, offset: u64) -> ReadAtOffset<'_, Self>
where
Self: Sized,
{
Expand All @@ -198,14 +197,14 @@ pub trait ReadAtAsync {
}

/// Fill the buffer by reading bytes at a specific offset and return the buffer back
fn read_at<B>(&self, buf: B, offset: usize) -> impl Future<Output = io::Result<B>>
fn read_at<B>(&self, buf: B, offset: u64) -> impl Future<Output = io::Result<B>>
where
AsyncReadBytes<B>: From<B>,
B: AsMut<[u8]> + Unpin + 'static;
}

impl ReadAtAsync for ! {
async fn read_at<B>(&self, _buf: B, _offset: usize) -> io::Result<B>
async fn read_at<B>(&self, _buf: B, _offset: u64) -> io::Result<B>
where
AsyncReadBytes<B>: From<B>,
B: AsMut<[u8]> + Unpin + 'static,
Expand All @@ -215,71 +214,71 @@ impl ReadAtAsync for ! {
}

impl ReadAtSync for [u8] {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
if buf.len() + offset > self.len() {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
if buf.len() as u64 + offset > self.len() as u64 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Buffer length with offset exceeds own length",
));
}

buf.copy_from_slice(&self[offset..][..buf.len()]);
buf.copy_from_slice(&self[offset as usize..][..buf.len()]);

Ok(())
}
}

impl ReadAtSync for &[u8] {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
if buf.len() + offset > self.len() {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
if buf.len() as u64 + offset > self.len() as u64 {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Buffer length with offset exceeds own length",
));
}

buf.copy_from_slice(&self[offset..][..buf.len()]);
buf.copy_from_slice(&self[offset as usize..][..buf.len()]);

Ok(())
}
}

impl ReadAtSync for Vec<u8> {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.as_slice().read_at(buf, offset)
}
}

impl ReadAtSync for &Vec<u8> {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.as_slice().read_at(buf, offset)
}
}

impl ReadAtSync for File {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
self.read_exact_at(buf, offset as u64)
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.read_exact_at(buf, offset)
}
}

impl ReadAtSync for &File {
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
self.read_exact_at(buf, offset as u64)
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.read_exact_at(buf, offset)
}
}

/// Reader with fixed offset added to all attempted reads
#[derive(Debug, Copy, Clone)]
pub struct ReadAtOffset<'a, T> {
inner: &'a T,
offset: usize,
offset: u64,
}

impl<T> ReadAtSync for ReadAtOffset<'_, T>
where
T: ReadAtSync,
{
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.inner.read_at(buf, offset + self.offset)
}
}
Expand All @@ -288,7 +287,7 @@ impl<T> ReadAtSync for &ReadAtOffset<'_, T>
where
T: ReadAtSync,
{
fn read_at(&self, buf: &mut [u8], offset: usize) -> io::Result<()> {
fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result<()> {
self.inner.read_at(buf, offset + self.offset)
}
}
Expand All @@ -297,7 +296,7 @@ impl<T> ReadAtAsync for ReadAtOffset<'_, T>
where
T: ReadAtAsync,
{
async fn read_at<B>(&self, buf: B, offset: usize) -> io::Result<B>
async fn read_at<B>(&self, buf: B, offset: u64) -> io::Result<B>
where
AsyncReadBytes<B>: From<B>,
B: AsMut<[u8]> + Unpin + 'static,
Expand All @@ -310,7 +309,7 @@ impl<T> ReadAtAsync for &ReadAtOffset<'_, T>
where
T: ReadAtAsync,
{
async fn read_at<B>(&self, buf: B, offset: usize) -> io::Result<B>
async fn read_at<B>(&self, buf: B, offset: u64) -> io::Result<B>
where
AsyncReadBytes<B>: From<B>,
B: AsMut<[u8]> + Unpin + 'static,
Expand Down
20 changes: 10 additions & 10 deletions crates/subspace-farmer-components/src/reading.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub enum ReadingError {
#[error("Failed to read chunk at location {chunk_location}")]
FailedToReadChunk {
/// Chunk location
chunk_location: usize,
chunk_location: u64,
/// Low-level error
error: io::Error,
},
Expand All @@ -44,7 +44,7 @@ pub enum ReadingError {
/// Indicates whether chunk was encoded
encoded_chunk_used: bool,
/// Chunk location
chunk_location: usize,
chunk_location: u64,
/// Lower-level error
error: String,
},
Expand Down Expand Up @@ -133,7 +133,7 @@ where
|((maybe_record_chunk, maybe_chunk_details), (s_bucket, &s_bucket_offset))| {
let (chunk_offset, encoded_chunk_used) = maybe_chunk_details?;

let chunk_location = chunk_offset + s_bucket_offset as usize;
let chunk_location = chunk_offset as u64 + u64::from(s_bucket_offset);

Some((
maybe_record_chunk,
Expand All @@ -153,8 +153,8 @@ where
sector
.read_at(
&mut record_chunk,
SectorContentsMap::encoded_size(pieces_in_sector)
+ chunk_location * Scalar::FULL_BYTES,
SectorContentsMap::encoded_size(pieces_in_sector) as u64
+ chunk_location * Scalar::FULL_BYTES as u64,
)
.map_err(|error| ReadingError::FailedToReadChunk {
chunk_location,
Expand Down Expand Up @@ -195,8 +195,8 @@ where
&sector
.read_at(
vec![0; Scalar::FULL_BYTES],
SectorContentsMap::encoded_size(pieces_in_sector)
+ chunk_location * Scalar::FULL_BYTES,
SectorContentsMap::encoded_size(pieces_in_sector) as u64
+ chunk_location * Scalar::FULL_BYTES as u64,
)
.await
.map_err(|error| ReadingError::FailedToReadChunk {
Expand Down Expand Up @@ -321,11 +321,11 @@ where
S: ReadAtSync,
A: ReadAtAsync,
{
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector)
+ sector_record_chunks_size(pieces_in_sector);
let sector_metadata_start = SectorContentsMap::encoded_size(pieces_in_sector) as u64
+ sector_record_chunks_size(pieces_in_sector) as u64;
// Move to the beginning of the commitment and witness we care about
let record_metadata_offset =
sector_metadata_start + RecordMetadata::encoded_size() * usize::from(piece_offset);
sector_metadata_start + RecordMetadata::encoded_size() as u64 * u64::from(piece_offset);

let mut record_metadata_bytes = vec![0; RecordMetadata::encoded_size()];
match sector {
Expand Down
Loading
Loading