diff --git a/Cargo.lock b/Cargo.lock index d5bb12f..03d5ec2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2378,6 +2378,7 @@ dependencies = [ "sp-api", "sp-authorship", "sp-block-builder", + "sp-blockchain", "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", @@ -2393,6 +2394,7 @@ dependencies = [ "sp-storage", "sp-timestamp", "sp-transaction-pool", + "thiserror", "tokio", "tracing", "tracing-subscriber", diff --git a/core/Cargo.toml b/core/Cargo.toml index 1c355b1..1063136 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -13,6 +13,8 @@ async-trait = "0.1.52" parking_lot = "0.12.1" lazy_static = "1.4.0" tracing = "0.1.3" +thiserror = "1.0" +tokio = { version = "1.15", features = ["macros"] } # Local dependencies fudge-companion = { path = "./src/builder/companion"} @@ -26,6 +28,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkad sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } @@ -71,7 +74,6 @@ cumulus-relay-chain-inprocess-interface= { git = "https://github.com/paritytech/ [dev-dependencies] polkadot-runtime = { git = "https://github.com/paritytech/polkadot", branch = "release-v0.9.29" } -tokio = { version = "1.15", features = ["macros"] } frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } @@ -80,3 +82,9 @@ pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", tracing-subscriber = "0.2" pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.29" } fudge-test-runtime = {path = "./src/tests/test-parachain", default-features = true} + +[features] +default = [] +runtime-benchmarks = [ + 'frame-benchmarking/runtime-benchmarks' +] \ No newline at end of file diff --git a/core/src/builder/companion/src/expand/mod.rs b/core/src/builder/companion/src/expand/mod.rs index ddf1071..4d1d57b 100644 --- a/core/src/builder/companion/src/expand/mod.rs +++ b/core/src/builder/companion/src/expand/mod.rs @@ -134,15 +134,17 @@ pub fn expand(def: CompanionDef) -> SynResult { #(#others_names,)* }; - #( - let para = _hidden_FudgeParaChain { - id: _hidden_ParaId::from(#parachain_ids), - head: companion.#parachain_names.head(), - code: companion.#parachain_names.code(), - }; - companion.#relay_chain_name.onboard_para(para).map_err(|_| ()).map(|_| ())?; - - )* + { + #( + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - Onboarding(#parachain_names):)); + let para = _hidden_FudgeParaChain { + id: _hidden_ParaId::from(#parachain_ids), + head: companion.#parachain_names.head(), + code: companion.#parachain_names.code(), + }; + companion.#relay_chain_name.onboard_para(para).map_err(|_| ()).map(|_| ())?; + )* + } Ok(companion) } @@ -158,14 +160,16 @@ pub fn expand(def: CompanionDef) -> SynResult { pub fn append_extrinsic(&mut self, chain: _hidden_Chain, xt: Vec) -> Result<(), ()> { match chain { _hidden_Chain::Relay => { - self.#relay_chain_name.append_extrinsic(__hidden_Decode::decode(&mut xt.as_slice()).map_err(|_|())?); - Ok(()) + self.#relay_chain_name.append_extrinsic(__hidden_Decode::decode(&mut xt.as_slice()).map_err(|_|())?) + .map(|_|()) + .map_err(|_|()) }, _hidden_Chain::Para(id) => match id { #( _ if id == #parachain_ids => { - self.#parachain_names.append_extrinsic(__hidden_Decode::decode(&mut xt.as_slice()).map_err(|_|())?); - Ok(()) + self.#parachain_names.append_extrinsic(__hidden_Decode::decode(&mut xt.as_slice()).map_err(|_|())?) + .map(|_|()) + .map_err(|_|()) }, )* _ => return Err(()), @@ -175,10 +179,16 @@ pub fn expand(def: CompanionDef) -> SynResult { pub fn with_state(&self, chain: _hidden_Chain, exec: impl FnOnce() -> R) -> Result { match chain { - _hidden_Chain::Relay => self.#relay_chain_name.with_state(exec).map_err(|_| ()), + _hidden_Chain::Relay => { + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - with_state:)); + self.#relay_chain_name.with_state(exec).map_err(|_| ()) + }, _hidden_Chain::Para(id) => match id { #( - _ if id == #parachain_ids => self.#parachain_names.with_state(exec).map_err(|_| ()), + _ if id == #parachain_ids => { + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#parachain_names - with_state:)); + self.#parachain_names.with_state(exec).map_err(|_| ()) + }, )* _ => Err(()) } @@ -187,10 +197,16 @@ pub fn expand(def: CompanionDef) -> SynResult { pub fn with_mut_state(&mut self, chain: _hidden_Chain, exec: impl FnOnce() -> R) -> Result { match chain { - _hidden_Chain::Relay => self.#relay_chain_name.with_mut_state(exec).map_err(|_| ()), + _hidden_Chain::Relay => { + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - with_state:)); + self.#relay_chain_name.with_mut_state(exec).map_err(|_| ()) + }, _hidden_Chain::Para(id) => match id { #( - _ if id == #parachain_ids => self.#parachain_names.with_mut_state(exec).map_err(|_| ()), + _ if id == #parachain_ids => { + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#parachain_names - with_state:)); + self.#parachain_names.with_mut_state(exec).map_err(|_| ()) + }, )* _ => Err(()) } @@ -199,28 +215,28 @@ pub fn expand(def: CompanionDef) -> SynResult { pub fn evolve(&mut self) -> Result<(), ()> { { - __hidden_tracing::enter_span!(sp_tracing::Level::TRACE, std::stringify!(#relay_chain_name - BlockBuilding:)); + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - BlockBuilding:)); self.#relay_chain_name.build_block().map_err(|_| ()).map(|_| ())?; self.#relay_chain_name.import_block().map_err(|_| ()).map(|_| ())?; } { #( - __hidden_tracing::enter_span!(sp_tracing::Level::TRACE, std::stringify!(#parachain_names - BlockBuilding:)); + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#parachain_names - BlockBuilding:)); self.#parachain_names.build_block().map_err(|_| ()).map(|_| ())?; self.#parachain_names.import_block().map_err(|_| ()).map(|_| ())?; )* } { - __hidden_tracing::enter_span!(sp_tracing::Level::TRACE, std::stringify!(#relay_chain_name - BlockBuilding:)); + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - BlockBuilding:)); self.#relay_chain_name.build_block().map_err(|_| ()).map(|_| ())?; self.#relay_chain_name.import_block().map_err(|_| ()).map(|_| ())?; } { #( - __hidden_tracing::enter_span!(sp_tracing::Level::TRACE, std::stringify!(#relay_chain_name - Onboarding(#parachain_names):)); + __hidden_tracing::enter_span!(sp_tracing::Level::INFO, std::stringify!(#relay_chain_name - Onboarding(#parachain_names):)); let para = _hidden_FudgeParaChain { id: _hidden_ParaId::from(#parachain_ids), head: self.#parachain_names.head(), diff --git a/core/src/builder/core.rs b/core/src/builder/core.rs index 6847b42..f37ba10 100644 --- a/core/src/builder/core.rs +++ b/core/src/builder/core.rs @@ -9,19 +9,17 @@ // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -use std::{marker::PhantomData, sync::Arc}; +use std::{collections::hash_map::DefaultHasher, marker::PhantomData, sync::Arc}; use frame_support::{pallet_prelude::TransactionSource, sp_runtime::traits::NumberFor}; use sc_client_api::{ backend::TransactionFor, blockchain::Backend as BlockchainBackend, AuxStore, Backend as BackendT, BlockBackend, BlockImportOperation, BlockOf, HeaderBackend, NewBlockState, - UsageProvider, + StateBackend, UsageProvider, }; -use sc_client_db::Backend; use sc_consensus::{BlockImport, BlockImportParams, ImportResult}; use sc_executor::RuntimeVersionOf; -use sc_service::{SpawnTaskHandle, TFullClient, TaskManager, TransactionPool}; -use sc_transaction_pool::{FullChainApi, RevalidationType}; +use sc_service::{SpawnTaskHandle, TaskManager, TransactionPool}; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, HashFor, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; @@ -32,12 +30,12 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT, One, Zero}, Digest, }; -use sp_state_machine::StorageProof; +use sp_state_machine::{StorageChanges, StorageProof}; use sp_std::time::Duration; use sp_storage::StateVersion; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use crate::{provider::ExternalitiesProvider, StoragePair}; +use crate::{provider::externalities::ExternalitiesProvider, StoragePair}; #[derive(Copy, Clone, Eq, PartialOrd, PartialEq, Ord, Hash)] pub enum Operation { @@ -56,31 +54,17 @@ pub enum PoolState { Busy(usize), } -pub struct Builder< - Block: BlockT, - RtApi, - Exec, - B = Backend, - C = TFullClient, -> where - Block: BlockT, - C: ProvideRuntimeApi - + BlockBackend - + BlockIdTo - + HeaderBackend - + Send - + Sync - + 'static, - C::Api: TaggedTransactionQueue, -{ +pub struct Builder { backend: Arc, client: Arc, - pool: Arc>, + pool: Arc, + executor: Exec, + task_manager: TaskManager, cache: TransitionCache, - _phantom: PhantomData<(Block, RtApi, Exec)>, + _phantom: PhantomData<(Block, RtApi)>, } -impl Builder +impl Builder where B: BackendT + 'static, Block: BlockT, @@ -102,29 +86,22 @@ where + BlockImport + CallApiAt + sc_block_builder::BlockBuilderProvider, + A: TransactionPool + MaintainedTransactionPool + 'static, { /// Create a new Builder with provided backend and client. - pub fn new(backend: Arc, client: Arc, manager: &TaskManager) -> Self { - let pool = Arc::new( - sc_transaction_pool::FullPool::::with_revalidation_type( - Default::default(), - true.into(), - Arc::new(FullChainApi::new( - client.clone(), - None, - &manager.spawn_essential_handle(), - )), - None, - RevalidationType::Full, - manager.spawn_essential_handle(), - client.usage_info().chain.best_number, - ), - ); - + pub fn new( + client: Arc, + backend: Arc, + pool: Arc, + executor: Exec, + task_manager: TaskManager, + ) -> Self { Builder { - backend: backend, - client: client, - pool: pool, + backend, + client, + pool, + executor, + task_manager, cache: TransitionCache { auxilliary: Vec::new(), }, @@ -160,6 +137,29 @@ where .unwrap() } + pub fn handle(&self) -> SpawnTaskHandle { + self.task_manager.spawn_handle() + } + + fn state_version(&self) -> StateVersion { + let wasm = self.latest_code(); + let code_fetcher = sp_core::traits::WrappedRuntimeCode(wasm.as_slice().into()); + let runtime_code = sp_core::traits::RuntimeCode { + code_fetcher: &code_fetcher, + heap_pages: None, + hash: { + use std::hash::{Hash, Hasher}; + let mut state = DefaultHasher::new(); + wasm.hash(&mut state); + state.finish().to_le_bytes().to_vec() + }, + }; + let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. + let runtime_version = + RuntimeVersionOf::runtime_version(&self.executor, &mut ext, &runtime_code).unwrap(); + runtime_version.state_version() + } + pub fn with_state( &self, op: Operation, @@ -183,33 +183,35 @@ where .map_err(|_| "Unable to start state-operation on backend".to_string())?; self.backend.begin_state_operation(&mut op, at).unwrap(); - let res = if self + let mut ext = ExternalitiesProvider::, B::State>::new(&state); + let r = ext.execute_with(exec); + + if self .backend .blockchain() .block_number_from_id(&at) .unwrap() .unwrap() == Zero::zero() { - self.mutate_genesis(&mut op, &state, exec) + self.mutate_genesis::(&mut op, ext.drain(self.state_version())) } else { - // We need to unfinalize the latest block and re-import it again in order to - // mutate it + // We need to revert the latest block and re-import it again in order to + // mutate it if it was already finalized let info = self.client.info(); if info.best_hash == info.finalized_hash { self.backend .revert(NumberFor::::one(), true) .unwrap(); } - self.mutate_normal(&mut op, &state, exec, at) - }; + self.mutate_normal::(&mut op, ext.drain(self.state_version()), at) + }?; self.backend .commit_operation(op) .map_err(|_| "Unable to commit state-operation on backend".to_string())?; - res + Ok(r) } - // TODO: Does this actually NOT change the state? Operation::DryRun => Ok( ExternalitiesProvider::, B::State>::new(&state).execute_with(exec), ), @@ -219,24 +221,24 @@ where fn mutate_genesis( &self, op: &mut B::BlockImportOperation, - state: &B::State, - exec: impl FnOnce() -> R, - ) -> Result { - let mut ext = ExternalitiesProvider::, B::State>::new(&state); - let (r, changes) = ext.execute_with_mut(exec); - let (_main_sc, _child_sc, _, tx, root, _tx_index) = changes.into_inner(); + changes: StorageChanges<<>::State as StateBackend>>::Transaction, HashFor>, + ) -> Result<(), String> { + let (main_sc, child_sc, _, tx, root, tx_index) = changes.into_inner(); - // We nee this in order to UNSET commited - // op.set_genesis_state(Storage::default(), true, StateVersion::V0) - // .unwrap(); op.update_db_storage(tx).unwrap(); + op.update_storage(main_sc, child_sc) + .map_err(|_| "Updating storage not possible.") + .unwrap(); + op.update_transaction_index(tx_index) + .map_err(|_| "Updating transaction index not possible.") + .unwrap(); let genesis_block = Block::new( Block::Header::new( Zero::zero(), <<::Header as HeaderT>::Hashing as HashT>::trie_root( Vec::new(), - StateVersion::V0, + self.state_version(), ), root, Default::default(), @@ -254,16 +256,15 @@ where ) .map_err(|_| "Could not set block data".to_string())?; - Ok(r) + Ok(()) } fn mutate_normal( &self, op: &mut B::BlockImportOperation, - state: &B::State, - exec: impl FnOnce() -> R, + changes: StorageChanges<<>::State as StateBackend>>::Transaction, HashFor>, at: BlockId, - ) -> Result { + ) -> Result<(), String> { let chain_backend = self.backend.blockchain(); let mut header = chain_backend .header(at) @@ -271,12 +272,9 @@ where .flatten() .expect("State is available. qed"); - let mut ext = ExternalitiesProvider::, B::State>::new(&state); - let (r, changes) = ext.execute_with_mut(exec); - let (main_sc, child_sc, _, tx, root, tx_index) = changes.into_inner(); - header.set_state_root(root); + op.update_db_storage(tx).unwrap(); op.update_storage(main_sc, child_sc) .map_err(|_| "Updating storage not possible.") @@ -293,7 +291,6 @@ where .justifications(at) .expect("State is available. qed."); - // TODO: We set as final, this might not be correct. op.set_block_data( header, body, @@ -302,7 +299,7 @@ where NewBlockState::Final, ) .unwrap(); - Ok(r) + Ok(()) } /// Append a given set of key-value-pairs into the builder cache diff --git a/core/src/builder/parachain.rs b/core/src/builder/parachain.rs index 0113645..26202ae 100644 --- a/core/src/builder/parachain.rs +++ b/core/src/builder/parachain.rs @@ -18,7 +18,9 @@ use sc_client_api::{ use sc_client_db::Backend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_executor::RuntimeVersionOf; -use sc_service::{SpawnTaskHandle, TFullClient, TaskManager}; +use sc_service::TFullClient; +use sc_transaction_pool::FullPool; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi, StorageProof}; use sp_block_builder::BlockBuilder; use sp_consensus::{BlockOrigin, Proposal}; @@ -36,7 +38,7 @@ use crate::{ digest::DigestCreator, inherent::ArgsProvider, types::StoragePair, - PoolState, + Initiator, PoolState, }; pub struct FudgeParaBuild { @@ -60,6 +62,7 @@ pub struct ParachainBuilder< DP, B = Backend, C = TFullClient, + A = FullPool, > where Block: BlockT, C: ProvideRuntimeApi @@ -70,18 +73,18 @@ pub struct ParachainBuilder< + Sync + 'static, C::Api: TaggedTransactionQueue, + A: TransactionPool + MaintainedTransactionPool + 'static, { - builder: Builder, + builder: Builder, cidp: CIDP, dp: DP, next: Option<(Block, StorageProof)>, imports: Vec<(Block, StorageProof)>, - handle: SpawnTaskHandle, _phantom: PhantomData, } -impl - ParachainBuilder +impl + ParachainBuilder where B: BackendT + 'static, Block: BlockT, @@ -107,15 +110,22 @@ where + BlockImport + CallApiAt + sc_block_builder::BlockBuilderProvider, + A: TransactionPool + MaintainedTransactionPool + 'static, { - pub fn new(manager: &TaskManager, backend: Arc, client: Arc, cidp: CIDP, dp: DP) -> Self { + pub fn new(initiator: I, setup: F) -> Self + where + I: Initiator, + F: FnOnce(Arc) -> (CIDP, DP), + { + let (client, backend, pool, executor, task_manager) = initiator.init().unwrap(); + let (cidp, dp) = setup(client.clone()); + Self { - builder: Builder::new(backend, client, manager), + builder: Builder::new(client, backend, pool, executor, task_manager), cidp, dp, next: None, imports: Vec::new(), - handle: manager.spawn_handle(), _phantom: Default::default(), } } @@ -193,7 +203,7 @@ where .unwrap(); let Proposal { block, proof, .. } = self.builder.build_block( - self.handle.clone(), + self.builder.handle(), inherents, digest, Duration::from_secs(60), diff --git a/core/src/builder/relay_chain.rs b/core/src/builder/relay_chain.rs index 008d948..b6d7052 100644 --- a/core/src/builder/relay_chain.rs +++ b/core/src/builder/relay_chain.rs @@ -23,7 +23,9 @@ use sc_client_api::{ use sc_client_db::Backend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_executor::RuntimeVersionOf; -use sc_service::{SpawnTaskHandle, TFullBackend, TFullClient, TaskManager}; +use sc_service::{TFullBackend, TFullClient}; +use sc_transaction_pool::FullPool; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi, StorageProof}; use sp_block_builder::BlockBuilder; use sp_consensus::{BlockOrigin, NoNetwork, Proposal}; @@ -46,7 +48,7 @@ use crate::{ digest::DigestCreator, inherent::ArgsProvider, types::StoragePair, - PoolState, + Initiator, PoolState, }; /// Recreating private storage types for easier handling storage access @@ -216,6 +218,7 @@ pub struct RelaychainBuilder< Runtime, B = Backend, C = TFullClient, + A = FullPool, > where Block: BlockT, C: ProvideRuntimeApi @@ -226,18 +229,18 @@ pub struct RelaychainBuilder< + Sync + 'static, C::Api: TaggedTransactionQueue, + A: TransactionPool + MaintainedTransactionPool + 'static, { - builder: Builder, + builder: Builder, cidp: CIDP, dp: DP, next: Option<(Block, StorageProof)>, imports: Vec<(Block, StorageProof)>, - handle: SpawnTaskHandle, _phantom: PhantomData<(ExtraArgs, Runtime)>, } -impl - RelaychainBuilder +impl + RelaychainBuilder where B: BackendT + 'static, Block: BlockT, @@ -266,15 +269,22 @@ where + BlockImport + CallApiAt + sc_block_builder::BlockBuilderProvider, + A: TransactionPool + MaintainedTransactionPool + 'static, { - pub fn new(manager: &TaskManager, backend: Arc, client: Arc, cidp: CIDP, dp: DP) -> Self { + pub fn new(initiator: I, setup: F) -> Self + where + I: Initiator, + F: FnOnce(Arc) -> (CIDP, DP), + { + let (client, backend, pool, executor, task_manager) = initiator.init().unwrap(); + let (cidp, dp) = setup(client.clone()); + Self { - builder: Builder::new(backend, client, &manager), + builder: Builder::new(client, backend, pool, executor, task_manager), cidp, dp, next: None, imports: Vec::new(), - handle: manager.spawn_handle(), _phantom: Default::default(), } } @@ -410,7 +420,7 @@ where .unwrap(); let Proposal { block, proof, .. } = self.builder.build_block( - self.handle.clone(), + self.builder.handle(), inherents, digest, Duration::from_secs(60), diff --git a/core/src/builder/stand_alone.rs b/core/src/builder/stand_alone.rs index 25a1d3c..f1137b0 100644 --- a/core/src/builder/stand_alone.rs +++ b/core/src/builder/stand_alone.rs @@ -15,7 +15,9 @@ use sc_client_api::{ use sc_client_db::Backend; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_executor::RuntimeVersionOf; -use sc_service::{SpawnTaskHandle, TFullClient, TaskManager}; +use sc_service::TFullClient; +use sc_transaction_pool::FullPool; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder; use sp_consensus::{BlockOrigin, Proposal}; @@ -34,7 +36,7 @@ use crate::{ digest::DigestCreator, inherent::ArgsProvider, types::StoragePair, - PoolState, + Initiator, PoolState, }; pub struct StandAloneBuilder< @@ -46,6 +48,7 @@ pub struct StandAloneBuilder< DP, B = Backend, C = TFullClient, + A = FullPool, > where Block: BlockT, C: ProvideRuntimeApi @@ -56,18 +59,18 @@ pub struct StandAloneBuilder< + Sync + 'static, C::Api: TaggedTransactionQueue, + A: TransactionPool + MaintainedTransactionPool + 'static, { - builder: Builder, + builder: Builder, cidp: CIDP, dp: DP, next: Option<(Block, StorageProof)>, imports: Vec<(Block, StorageProof)>, - handle: SpawnTaskHandle, _phantom: PhantomData, } -impl - StandAloneBuilder +impl + StandAloneBuilder where B: BackendT + 'static, Block: BlockT, @@ -93,15 +96,22 @@ where + BlockImport + CallApiAt + sc_block_builder::BlockBuilderProvider, + A: TransactionPool + MaintainedTransactionPool + 'static, { - pub fn new(manager: &TaskManager, backend: Arc, client: Arc, cidp: CIDP, dp: DP) -> Self { + pub fn new(initiator: I, setup: F) -> Self + where + I: Initiator, + F: FnOnce(Arc) -> (CIDP, DP), + { + let (client, backend, pool, executor, task_manager) = initiator.init().unwrap(); + let (cidp, dp) = setup(client.clone()); + Self { - builder: Builder::new(backend, client, manager), + builder: Builder::new(client, backend, pool, executor, task_manager), cidp, dp, next: None, imports: Vec::new(), - handle: manager.spawn_handle(), _phantom: Default::default(), } } @@ -179,7 +189,7 @@ where .unwrap(); let Proposal { block, proof, .. } = self.builder.build_block( - self.handle.clone(), + self.builder.handle(), inherents, digest, Duration::from_secs(60), diff --git a/core/src/lib.rs b/core/src/lib.rs index bf79eee..e413d0b 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -20,7 +20,12 @@ pub use builder::{ stand_alone::StandAloneBuilder, PoolState, }; -pub use provider::EnvProvider; +pub use provider::{ + backend::{DiskDatabaseType, DiskDb, MemDb}, + initiator::{FromConfiguration, Init, PoolConfig}, + state::StateProvider, + BackendProvider, Initiator, +}; pub use types::{Bytes, StoragePair}; pub mod builder; diff --git a/core/src/provider/backend.rs b/core/src/provider/backend.rs new file mode 100644 index 0000000..5c710c1 --- /dev/null +++ b/core/src/provider/backend.rs @@ -0,0 +1,233 @@ +// Copyright 2021 Centrifuge Foundation (centrifuge.io). +// +// This file is part of the FUDGE project. +// FUDGE is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version (see http://www.gnu.org/licenses). +// Centrifuge is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +use std::{marker::PhantomData, path::PathBuf, sync::Arc}; + +use sc_client_db::{BlocksPruning, DatabaseSettings, DatabaseSource, PruningMode}; +use sc_service::TFullBackend; +use sp_api::BlockT; + +use crate::provider::BackendProvider; + +/// The default canonicalization delay of the +/// backends that are instantiated here. +const CANONICALIZATION_DELAY: u64 = 4096; + +/// The default cache size for a RocksDb +const ROCKS_DB_CACHE_SIZE: usize = 1024; + +/// A struct holding all necessary information +/// to create a memory backend that implements +/// `sc_client_api::Backend` +pub struct MemDb { + blocks_pruning: BlocksPruning, + trie_cache: Option, + state_pruning: Option, + require_create_flag: bool, + canonicalization_delay: u64, + _phantom: PhantomData, +} + +impl MemDb { + /// Creates a new `MemDb` with some sane + /// defaults. + /// + /// ```ignore + /// Self { + /// path, + /// blocks_pruning: BlocksPruning::All, + /// trie_cache: None, + /// state_pruning: Some(PruningMode::ArchiveAll), + /// require_create_flag: true, + /// canonicalization_delay: 4096, + /// _phantom: Default::default() + /// } + /// ``` + /// + /// Every default can be overwritten with the respective + /// `with_*` method of the struct. + pub fn new() -> Self { + Self { + blocks_pruning: BlocksPruning::All, + trie_cache: None, + state_pruning: Some(PruningMode::ArchiveAll), + require_create_flag: true, + canonicalization_delay: CANONICALIZATION_DELAY, + _phantom: Default::default(), + } + } + + /// Overwrites current block pruning. The newly set `BlockPruning` will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_blocks_pruning(&mut self, pruning: BlocksPruning) -> &mut Self { + self.blocks_pruning = pruning; + self + } + + /// Overwrites current trie cache. The newly set trie cache size will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_trie_cache(&mut self, trie_cache: usize) -> &mut Self { + self.trie_cache = Some(trie_cache); + self + } + + /// Overwrites current state pruning mode. The newly set `PruningMode` will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_state_pruning(&mut self, pruning: PruningMode) -> &mut Self { + self.state_pruning = Some(pruning); + self + } + + /// Overwrites current requiere create flag. The newly set flag will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_create_flag(&mut self, create: bool) -> &mut Self { + self.require_create_flag = create; + self + } + + /// Overwrites current canonicalization delay. The newly set canonicalization delay will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_canonicalization_delay(&mut self, delay: u64) -> &mut Self { + self.canonicalization_delay = delay; + self + } +} + +impl BackendProvider for MemDb { + type Backend = TFullBackend; + + fn provide(&self) -> Result, sp_blockchain::Error> { + let settings = DatabaseSettings { + trie_cache_maximum_size: self.trie_cache, + state_pruning: self.state_pruning.clone(), + source: DatabaseSource::Custom { + db: Arc::new(sp_database::MemDb::new()), + require_create_flag: self.require_create_flag, + }, + blocks_pruning: self.blocks_pruning, + }; + sc_client_db::Backend::new(settings, self.canonicalization_delay) + .map(|backend| Arc::new(backend)) + } +} + +/// A struct holding all necessary information +/// to create a disk backend that implements +/// `sc_client_api::Backend` +pub struct DiskDb { + path: PathBuf, + blocks_pruning: BlocksPruning, + trie_cache: Option, + state_pruning: Option, + database_type: DiskDatabaseType, + canonicalization_delay: u64, + _phantom: PhantomData, +} + +impl DiskDb { + /// Creates a new `DiskDb` with some sane + /// defaults. + /// + /// ```ignore + /// Self { + /// path, + /// blocks_pruning: BlocksPruning::All, + /// trie_cache: None, + /// state_pruning: Some(PruningMode::ArchiveAll), + /// database_type: DiskDatabaseType::RocksDb {cache_size: 1024}, + /// canonicalization_delay: 4096, + /// _phantom: Default::default() + /// } + /// ``` + /// + /// Every default can be overwritten with the respective + /// `with_*` method of the struct. + pub fn new(path: PathBuf) -> Self { + Self { + path, + blocks_pruning: BlocksPruning::All, + trie_cache: None, + state_pruning: Some(PruningMode::ArchiveAll), + database_type: DiskDatabaseType::RocksDb { + cache_size: ROCKS_DB_CACHE_SIZE, + }, + canonicalization_delay: CANONICALIZATION_DELAY, + _phantom: Default::default(), + } + } + + /// Overwrites current block pruning. The newly set `BlockPruning` will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_blocks_pruning(&mut self, pruning: BlocksPruning) -> &mut Self { + self.blocks_pruning = pruning; + self + } + + /// Overwrites current trie cache. The newly set trie cache size will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_trie_cache(&mut self, trie_cache: usize) -> &mut Self { + self.trie_cache = Some(trie_cache); + self + } + + /// Overwrites current state pruning mode. The newly set `PruningMode` will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_state_pruning(&mut self, pruning: PruningMode) -> &mut Self { + self.state_pruning = Some(pruning); + self + } + + /// Overwrites current database type. The newly set `DiskDatabaseType` will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_database_type(&mut self, datbase_type: DiskDatabaseType) -> &mut Self { + self.database_type = datbase_type; + self + } + + /// Overwrites current canonicalization delay. The newly set canonicalization delay will be used + /// when a backend is created via `BackendProvider::provide(&self)` + pub fn with_canonicalization_delay(&mut self, delay: u64) -> &mut Self { + self.canonicalization_delay = delay; + self + } +} + +impl BackendProvider for DiskDb { + type Backend = TFullBackend; + + fn provide(&self) -> Result, sp_blockchain::Error> { + let settings = DatabaseSettings { + trie_cache_maximum_size: self.trie_cache, + state_pruning: self.state_pruning.clone(), + source: match self.database_type { + DiskDatabaseType::RocksDb { cache_size } => DatabaseSource::RocksDb { + path: self.path.clone(), + cache_size, + }, + DiskDatabaseType::ParityDb => DatabaseSource::ParityDb { + path: self.path.clone(), + }, + }, + blocks_pruning: self.blocks_pruning, + }; + sc_client_db::Backend::new(settings, self.canonicalization_delay) + .map(|backend| Arc::new(backend)) + } +} + +/// Enum indicating which kind of disk +/// database should be used. +#[derive(Copy, Clone, Debug)] +pub enum DiskDatabaseType { + RocksDb { cache_size: usize }, + ParityDb, +} diff --git a/core/src/provider/externalities.rs b/core/src/provider/externalities.rs new file mode 100644 index 0000000..3e26ab0 --- /dev/null +++ b/core/src/provider/externalities.rs @@ -0,0 +1,111 @@ +// Copyright 2021 Centrifuge Foundation (centrifuge.io). +// +// This file is part of the FUDGE project. +// FUDGE is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version (see http://www.gnu.org/licenses). +// Centrifuge is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! The module provides a ways and means to interact with +//! and provide externalities. + +use std::panic::{AssertUnwindSafe, UnwindSafe}; + +use sp_core::Hasher; +use sp_externalities::Externalities; +use sp_state_machine::{Backend, Ext, OverlayedChanges, StorageChanges, StorageTransactionCache}; +use sp_storage::StateVersion; + +/// Provides a simple and secure way to execute code +/// in an externalities provided environment. +/// +/// The struct solely needs something that implements `trait sp_state_machine::Backend`. +/// From there on +pub struct ExternalitiesProvider<'a, H, B> +where + H: Hasher, + H::Out: codec::Codec + Ord + 'static, + B: Backend, +{ + overlay: OverlayedChanges, + storage_transaction_cache: StorageTransactionCache<>::Transaction, H>, + backend: &'a B, +} + +impl<'a, H, B> ExternalitiesProvider<'a, H, B> +where + H: Hasher, + H::Out: codec::Codec + Ord + 'static, + B: Backend, +{ + /// Create a new `ExternalitiesProvider`. + pub fn new(backend: &'a B) -> Self { + Self { + backend, + storage_transaction_cache: StorageTransactionCache::default(), + overlay: OverlayedChanges::default(), + } + } + + /// Get externalities implementation. + pub fn ext(&mut self) -> Ext { + Ext::new( + &mut self.overlay, + &mut self.storage_transaction_cache, + &self.backend, + None, + ) + } + + /// Drains the overlay changes into a `StorageChanges` struct. Leaving an empty overlay + /// in place. + /// + /// This can be used to retain changes that should be commited to an underlying database or + /// to reset the overlay. + pub fn drain(&mut self, state_version: StateVersion) -> StorageChanges { + self.overlay + .drain_storage_changes::( + self.backend, + &mut self.storage_transaction_cache, + state_version, + ) + .expect("Drain storage changes implementation does not return result but fails. Qed.") + } + + /// Execute some code in an externalities provided environment. + /// + /// Panics are NOT catched. + pub fn execute_with(&mut self, execute: impl FnOnce() -> R) -> R { + let mut ext = self.ext(); + ext.storage_start_transaction(); + let r = sp_externalities::set_and_run_with_externalities(&mut ext, execute); + ext.storage_commit_transaction().expect( + "Started a transaction above. Runtime takes care of opening and closing transactions correctly too. Qed.", + ); + r + } + + /// Execute the given closure while `self` is set as externalities. + /// + /// Returns the result of the given closure, if no panics occured. + /// Otherwise, returns `Err`. + pub fn execute_with_safe( + &mut self, + execute: impl FnOnce() -> R + UnwindSafe, + ) -> Result { + let mut ext = AssertUnwindSafe(self.ext()); + std::panic::catch_unwind(move || { + ext.storage_start_transaction(); + let r = sp_externalities::set_and_run_with_externalities(&mut *ext, execute); + ext.storage_commit_transaction().expect( + "Started a transaction above. Runtime takes care of opening and closing transactions correctly too. Qed.", + ); + r + }) + .map_err(|e| format!("Closure panicked: {:?}", e)) + } +} diff --git a/core/src/provider/externalities_provider.rs b/core/src/provider/externalities_provider.rs deleted file mode 100644 index 1e283e8..0000000 --- a/core/src/provider/externalities_provider.rs +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2021 Centrifuge Foundation (centrifuge.io). -// -// This file is part of the FUDGE project. -// FUDGE is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version (see http://www.gnu.org/licenses). -// Centrifuge is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -use std::panic::{AssertUnwindSafe, UnwindSafe}; - -use sp_core::Hasher; -use sp_externalities::Externalities; -use sp_state_machine::{Backend, Ext, OverlayedChanges, StorageChanges, StorageTransactionCache}; -use sp_storage::StateVersion; - -pub struct ExternalitiesProvider<'a, H, B> -where - H: Hasher, - H::Out: codec::Codec + Ord + 'static, - B: Backend, -{ - overlay: OverlayedChanges, - // TODO: Do we need an offchain-db here? - //offchain_db: TestPersistentOffchainDB, - storage_transaction_cache: StorageTransactionCache<>::Transaction, H>, - backend: &'a B, -} - -impl<'a, H, B> ExternalitiesProvider<'a, H, B> -where - H: Hasher, - H::Out: codec::Codec + Ord + 'static, - B: Backend, -{ - pub fn new(backend: &'a B) -> Self { - Self { - backend, - storage_transaction_cache: StorageTransactionCache::default(), - overlay: OverlayedChanges::default(), - } - } - - /// Get externalities implementation. - pub fn ext(&mut self) -> Ext { - Ext::new( - &mut self.overlay, - &mut self.storage_transaction_cache, - &self.backend, - None, - ) - } - - /* - /// Create a new instance_id of `TestExternalities` with storage. - pub fn new(storage: Storage) -> Self { - Self::new_with_code(&[], storage) - } - - /// New empty test externalities. - pub fn new_empty() -> Self { - Self::new_with_code(&[], Storage::default()) - } - - /// Create a new instance_id of `TestExternalities` with code and storage. - pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { - let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage - .top - .get(CHANGES_TRIE_CONFIG) - .and_then(|v| Decode::decode(&mut &v[..]).ok()); - overlay.set_collect_extrinsics(changes_trie_config.is_some()); - - assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); - - storage.top.insert(CODE.to_vec(), code.to_vec()); - - let mut extensions = Extensions::default(); - extensions.register(TaskExecutorExt::new(TaskExecutor::new())); - - let offchain_db = TestPersistentOffchainDB::new(); - - TestExternalities { - overlay, - offchain_db, - changes_trie_config, - extensions, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: storage.into(), - storage_transaction_cache: Default::default(), - } - } - - /// Returns the overlayed changes. - pub fn overlayed_changes(&self) -> &OverlayedChanges { - &self.overlay - } - - /// Move offchain changes from overlay to the persistent store. - pub fn persist_offchain_overlay(&mut self) { - self.offchain_db.apply_offchain_changes(self.overlay.offchain_drain_committed()); - } - - /// A shared reference type around the offchain worker storage. - pub fn offchain_db(&self) -> TestPersistentOffchainDB { - self.offchain_db.clone() - } - - /// Insert key/value into backend - pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend.insert(vec![(None, vec![(k, Some(v))])]); - } - - /// Registers the given extension for this instance_id. - pub fn register_extension(&mut self, ext: E) { - self.extensions.register(ext); - } - - /// Get mutable reference to changes trie storage. - pub fn changes_trie_storage(&mut self) -> &mut ChangesTrieInMemoryStorage { - &mut self.changes_trie_storage - } - - /// Return a new backend with all pending changes. - /// - /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open - /// transactions. - pub fn as_backend(&self) -> B { - let top: Vec<_> = - self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())).collect(); - let mut transaction = vec![(None, top)]; - - for (child_changes, child_info) in self.overlay.children() { - transaction.push(( - Some(child_info.clone()), - child_changes.map(|(k, v)| (k.clone(), v.value().cloned())).collect(), - )) - } - - self.backend.update(transaction) - } - */ - - /// Execute the given closure while `self` is set as externalities. - /// - /// Returns the result of the given closure. - pub fn execute_with(&mut self, execute: impl FnOnce() -> R) -> R { - let mut ext = self.ext(); - sp_externalities::set_and_run_with_externalities(&mut ext, execute) - } - - pub fn execute_with_mut( - &mut self, - execute: impl FnOnce() -> R, - ) -> (R, StorageChanges) { - let _parent_hash = self.overlay.storage_root( - self.backend, - &mut self.storage_transaction_cache, - StateVersion::V0, - ); - - let mut ext = self.ext(); - ext.storage_start_transaction(); - let r = sp_externalities::set_and_run_with_externalities(&mut ext, execute); - // TODO: Handle unwrap - ext.storage_commit_transaction().unwrap(); - - ( - r, - self.overlay - .drain_storage_changes::( - self.backend, - &mut self.storage_transaction_cache, - StateVersion::V0, - ) - .unwrap(), - ) - } - - /// Execute the given closure while `self` is set as externalities. - /// - /// Returns the result of the given closure, if no panics occured. - /// Otherwise, returns `Err`. - #[allow(dead_code)] - pub fn execute_with_safe( - &mut self, - f: impl FnOnce() -> R + UnwindSafe, - ) -> Result { - let mut ext = AssertUnwindSafe(self.ext()); - std::panic::catch_unwind(move || { - sp_externalities::set_and_run_with_externalities(&mut *ext, f) - }) - .map_err(|e| format!("Closure panicked: {:?}", e)) - } -} diff --git a/core/src/provider/initiator.rs b/core/src/provider/initiator.rs new file mode 100644 index 0000000..8f005bf --- /dev/null +++ b/core/src/provider/initiator.rs @@ -0,0 +1,411 @@ +// Copyright 2021 Centrifuge Foundation (centrifuge.io). +// +// This file is part of the FUDGE project. +// FUDGE is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version (see http://www.gnu.org/licenses). +// Centrifuge is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! The module provides a struct that implements `trait Initiator`. +//! Builders will expect something that implements this trait in +//! order to retrieve a `client` and a `backend`. + +use std::{marker::PhantomData, sync::Arc}; + +use polkadot_cli::service::HeaderBackend; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_client_api::{ + execution_extensions::ExecutionStrategies, AuxStore, Backend, BlockBackend, BlockOf, + UsageProvider, +}; +use sc_consensus::BlockImport; +use sc_executor::{RuntimeVersionOf, WasmExecutionMethod, WasmExecutor}; +use sc_service::{ + ClientConfig, Configuration, KeystoreContainer, LocalCallExecutor, TFullBackend, TFullClient, + TaskManager, +}; +use sc_transaction_pool::{FullChainApi, FullPool, Options, RevalidationType}; +use sp_api::{ApiExt, BlockT, CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi}; +use sp_core::traits::CodeExecutor; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::{traits::BlockIdTo, BuildStorage}; +use sp_storage::Storage; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use tokio::runtime::Handle; + +use crate::{ + provider::{BackendProvider, ClientProvider, DefaultClient, TWasmExecutor}, + Initiator, MemDb, +}; + +/// A struct that holds configuration +/// options for a transaction pool. +pub struct PoolConfig { + is_validator: bool, + options: Options, + revalidation: RevalidationType, +} + +pub fn default_with( + handle: Handle, + backend: BP, +) -> Init, BP> +where + BP: BackendProvider>, + Block: BlockT, + RtApi: ConstructRuntimeApi> + + Send + + Sync + + 'static, + >>::RuntimeApi: + TaggedTransactionQueue + + BlockBuilderApi + + ApiExt as Backend>::State>, +{ + Init::new( + backend, + DefaultClient::::new(), + WasmExecutor::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), + handle, + ) +} + +pub fn default( + handle: Handle, +) -> Init, MemDb> +where + Block: BlockT, + RtApi: ConstructRuntimeApi> + + Send + + Sync + + 'static, + >>::RuntimeApi: + TaggedTransactionQueue + + BlockBuilderApi + + ApiExt as Backend>::State>, +{ + Init::new( + MemDb::new(), + DefaultClient::::new(), + WasmExecutor::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), + handle, + ) +} + +/// A structure that provides all necessary +/// configuration to instantiate the needed +/// structures for a core builder of fudge. +/// +/// It implements `Initiator`. +pub struct Init +where + Block: BlockT, + CP: ClientProvider, +{ + backend_provider: BP, + client_provider: CP, + genesis: Box, + handle: Handle, + exec: CP::Exec, + pool_config: PoolConfig, + /// Optional keystore that can be appended + keystore: Option, + /// Optional ClientConfig that can be appended + client_config: ClientConfig, + /// Optional ExecutionStrategies that can be appended + execution_strategies: ExecutionStrategies, +} + +impl Init +where + Block: BlockT, + CP: ClientProvider, + BP: BackendProvider, + CP::Backend: Backend + 'static, + CP::Client: 'static + + ProvideRuntimeApi + + BlockOf + + BlockBackend + + BlockIdTo + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend + + BlockImport + + CallApiAt + + BlockBuilderProvider, +{ + /// Creates a new `Init` instance with some sane defaults: + /// + /// ```ignore + /// Self { + /// backend, + /// genesis: None, + /// handle: TaskManager::new(handle, None).unwrap(), + /// exec, + /// keystore: None, + /// client_config: None, + /// pool_config: PoolConfig { + /// is_validator: true, + /// options: Options::default(), + /// revalidation: RevalidationType::Full, + /// }, + /// execution_strategies: None, + /// } + /// ``` + /// + /// Every configuration field can be overwritten with the respective `with_*` method. + pub fn new(backend_provider: BP, client_provider: CP, exec: CP::Exec, handle: Handle) -> Self { + Self { + backend_provider, + client_provider, + genesis: Box::new(Storage::default()), + handle, + exec, + keystore: None, + client_config: ClientConfig::default(), + pool_config: PoolConfig { + is_validator: true, + options: Options::default(), + revalidation: RevalidationType::Full, + }, + execution_strategies: ExecutionStrategies::default(), + } + } + + /// Overwrites the used `ExecutionStrategies` that will be used when initiating the + /// structs for a core builder. + pub fn with_exec_strategies(&mut self, execution_strategies: ExecutionStrategies) -> &mut Self { + self.execution_strategies = execution_strategies; + self + } + + /// Overwrites the used genesis that will be used when initiating the + /// structs for a core builder. + pub fn with_genesis(&mut self, genesis: Box) -> &mut Self { + self.genesis = genesis; + self + } + + /// Overwrites the used `ClientConfig` that will be used when initiating the + /// structs for a core builder. + pub fn with_config(&mut self, config: ClientConfig) -> &mut Self { + self.client_config = config; + self + } + + /// Overwrites the used keystore pointer that will be used when initiating the + /// structs for a core builder. + pub fn with_keystore(&mut self, keystore: SyncCryptoStorePtr) -> &mut Self { + self.keystore = Some(keystore); + self + } + + /// Overwrites the used `PoolConfig` that will be used when initiating the + /// structs for a core builder. + pub fn with_pool_config(&mut self, pool_config: PoolConfig) -> &mut Self { + self.pool_config = pool_config; + self + } +} + +impl Initiator for Init +where + Block: BlockT, + CP: ClientProvider, + BP: BackendProvider, + CP::Backend: Backend + 'static, + CP::Client: 'static + + ProvideRuntimeApi + + BlockOf + + BlockBackend + + BlockIdTo + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend + + BlockImport + + CallApiAt + + BlockBuilderProvider, +{ + type Api = CP::Api; + type Backend = CP::Backend; + type Client = CP::Client; + type Error = sp_blockchain::Error; + type Executor = CP::Exec; + type Pool = FullPool; + + fn init( + self, + ) -> Result< + ( + Arc, + Arc, + Arc>, + CP::Exec, + TaskManager, + ), + sp_blockchain::Error, + > { + let task_manager = TaskManager::new(self.handle, None).unwrap(); + let backend = self.backend_provider.provide().unwrap(); + let call_executor = LocalCallExecutor::new( + backend.clone(), + self.exec.clone(), + Box::new(task_manager.spawn_handle()), + self.client_config.clone(), + ) + .unwrap(); + let client = self + .client_provider + .provide( + self.client_config, + self.genesis, + self.execution_strategies, + self.keystore, + backend.clone(), + call_executor, + ) + .unwrap(); + + let pool = Arc::new(FullPool::::with_revalidation_type( + self.pool_config.options, + self.pool_config.is_validator.into(), + Arc::new(FullChainApi::new( + client.clone(), + None, + &task_manager.spawn_essential_handle(), + )), + None, + self.pool_config.revalidation, + task_manager.spawn_essential_handle(), + client.usage_info().chain.best_number, + )); + + Ok((client, backend, pool, self.exec, task_manager)) + } +} + +/// A structure that provides all necessary +/// configuration to instantiate the needed +/// structures for a core builder of fudge. +/// +/// It implements `Initiator`. This +/// struct uses the `Configuration` struct +/// used by many services of actual Substrate nodes. +pub struct FromConfiguration { + exec: Exec, + config: Configuration, + keystore_receiver: Box, + pool_config: PoolConfig, + _phantom: PhantomData<(Block, RtApi)>, +} + +impl FromConfiguration { + /// Creates a new instance of `FromConfiguration` with some + /// sane defaults. + /// + /// ```ignore + /// Self { + /// exec, + /// config, + /// keystore_receiver: |_| {}, + /// pool_config: PoolConfig { + /// is_validator: true, + /// options: Options::default(), + /// revalidation: RevalidationType::Full, + /// }, + /// _phantom: Default::default(), + /// } + /// ``` + /// The given defaults can be overwritten with the + /// respective `with_*` methods. + pub fn new(exec: Exec, config: Configuration) -> Self { + Self { + exec, + config, + keystore_receiver: Box::new(|_| {}), + pool_config: PoolConfig { + is_validator: true, + options: Options::default(), + revalidation: RevalidationType::Full, + }, + _phantom: Default::default(), + } + } + + /// Overwrites the used `PoolConfig` that will be used when initiating the + /// structs for a core builder. + pub fn with_pool_config(&mut self, pool_config: PoolConfig) -> &mut Self { + self.pool_config = pool_config; + self + } + + /// Overwrites the used keystore receiver that will be used when initiating the + /// structs for a core builder. + pub fn with_keystore_receiver(&mut self, receiver: R) -> &mut Self + where + R: FnOnce(KeystoreContainer) + 'static, + { + self.keystore_receiver = Box::new(receiver); + self + } +} + +impl Initiator for FromConfiguration +where + Block: BlockT, + RtApi: ConstructRuntimeApi> + Send + Sync + 'static, + >>::RuntimeApi: + TaggedTransactionQueue + + BlockBuilderApi + + ApiExt as Backend>::State>, + Exec: CodeExecutor + RuntimeVersionOf + Clone + 'static, +{ + type Api = RtApi::RuntimeApi; + type Backend = TFullBackend; + type Client = TFullClient; + type Error = sp_blockchain::Error; + type Executor = Exec; + type Pool = FullPool; + + fn init( + self, + ) -> Result< + ( + Arc>, + Arc>, + Arc>>, + Exec, + TaskManager, + ), + Self::Error, + > { + let (client, backend, keystore_container, task_manager) = + sc_service::new_full_parts(&self.config, None, self.exec.clone()).unwrap(); // TODO NEED own error type + let client = Arc::new(client); + + let pool = Arc::new(FullPool::::with_revalidation_type( + self.pool_config.options, + self.pool_config.is_validator.into(), + Arc::new(FullChainApi::new( + client.clone(), + None, + &task_manager.spawn_essential_handle(), + )), + None, + self.pool_config.revalidation, + task_manager.spawn_essential_handle(), + client.usage_info().chain.best_number, + )); + + (self.keystore_receiver)(keystore_container); + Ok((client, backend, pool, self.exec, task_manager)) + } +} diff --git a/core/src/provider/mod.rs b/core/src/provider/mod.rs index 0ca7802..e40d6aa 100644 --- a/core/src/provider/mod.rs +++ b/core/src/provider/mod.rs @@ -10,189 +10,169 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -pub use externalities_provider::ExternalitiesProvider; -use sc_executor::RuntimeVersionOf; -use sc_service::{ - config::ExecutionStrategies, ClientConfig, Configuration, KeystoreContainer, TFullBackend, - TFullCallExecutor, TFullClient, TaskManager, +use std::{error::Error, marker::PhantomData, sync::Arc}; + +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_client_api::{ + execution_extensions::ExecutionStrategies, AuxStore, Backend as BackendT, Backend, + BlockBackend, BlockOf, HeaderBackend, UsageProvider, }; -use sp_api::{BlockT, ConstructRuntimeApi}; -use sp_core::traits::{CodeExecutor, SpawnNamed}; +use sc_consensus::BlockImport; +#[cfg(feature = "runtime-benchmarks")] +use sc_executor::sp_wasm_interface::HostFunctions; +use sc_executor::{RuntimeVersionOf, WasmExecutor}; +use sc_service::{ClientConfig, LocalCallExecutor, TFullBackend, TFullClient, TaskManager}; +use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool}; +use sp_api::{ApiExt, CallApiAt, ConstructRuntimeApi, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder; +use sp_core::traits::CodeExecutor; use sp_keystore::SyncCryptoStorePtr; -use sp_runtime::BuildStorage; -use sp_std::{marker::PhantomData, str::FromStr, sync::Arc}; -use sp_storage::Storage; - -pub use crate::provider::state_provider::DbOpen; -use crate::provider::state_provider::StateProvider; - -mod externalities_provider; -mod state_provider; +use sp_runtime::{ + traits::{Block as BlockT, BlockIdTo}, + BuildStorage, +}; +use sp_transaction_pool::runtime_api::TaggedTransactionQueue; + +pub mod backend; +pub mod externalities; +pub mod initiator; +pub mod state; + +pub trait Initiator { + type Api: BlockBuilder + + ApiExt>::State> + + BlockBuilderApi + + TaggedTransactionQueue; + type Client: 'static + + ProvideRuntimeApi + + BlockOf + + BlockBackend + + BlockIdTo + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend + + BlockImport + + CallApiAt + + BlockBuilderProvider; + type Backend: 'static + BackendT; + type Pool: 'static + + TransactionPool + + MaintainedTransactionPool; + type Executor: 'static + CodeExecutor + RuntimeVersionOf + Clone; + type Error: 'static + Error; -pub struct EnvProvider -where - Block: BlockT, -{ - state: StateProvider, Block>, - _phantom: PhantomData<(Block, RtApi, Exec)>, + fn init( + self, + ) -> Result< + ( + Arc, + Arc, + Arc, + Self::Executor, + TaskManager, + ), + Self::Error, + >; } -impl EnvProvider -where - Block: BlockT, - Block::Hash: FromStr, - RtApi: ConstructRuntimeApi> + Send, - Exec: CodeExecutor + RuntimeVersionOf + Clone + 'static, -{ - pub fn empty() -> Self { - Self { - state: StateProvider::empty_default(None), - _phantom: Default::default(), - } - } +pub trait BackendProvider { + type Backend: 'static + BackendT; - pub fn with_code(code: &'static [u8]) -> Self { - Self { - state: StateProvider::empty_default(Some(code)), - _phantom: Default::default(), - } - } - - pub fn from_spec(spec: &dyn BuildStorage) -> Self { - let storage = spec.build_storage().unwrap(); - Self::from_storage(storage) - } - - pub fn into_from_config( - config: &Configuration, - exec: Exec, - ) -> ( - TFullClient, - Arc>, - KeystoreContainer, - TaskManager, - ) { - //TODO: Handle unwrap - sc_service::new_full_parts(config, None, exec) - .map_err(|_| "err".to_string()) - .unwrap() - } - - pub fn from_storage(storage: Storage) -> Self { - Self { - state: StateProvider::from_storage(storage), - _phantom: Default::default(), - } - } - - pub fn from_db(open: DbOpen) -> Self { - Self { - state: StateProvider::from_db(open), - _phantom: Default::default(), - } - } - - pub fn from_storage_with_code(storage: Storage, code: &'static [u8]) -> Self { - let mut state = StateProvider::empty_default(Some(code)); - state.insert_storage(storage); + fn provide(&self) -> Result, sp_blockchain::Error>; +} - Self { - state, - _phantom: Default::default(), - } - } +pub trait ClientProvider { + type Api: BlockBuilder + + ApiExt>::State> + + BlockBuilderApi + + TaggedTransactionQueue; + type Backend: 'static + BackendT; + type Client: 'static + + ProvideRuntimeApi + + BlockOf + + BlockBackend + + BlockIdTo + + Send + + Sync + + AuxStore + + UsageProvider + + HeaderBackend + + BlockImport + + CallApiAt + + BlockBuilderProvider; + type Exec: CodeExecutor + RuntimeVersionOf + 'static; + + fn provide( + &self, + config: ClientConfig, + genesis: Box, + execution_strategies: ExecutionStrategies, + keystore: Option, + backend: Arc, + exec: LocalCallExecutor, + ) -> Result, ()>; +} - pub fn insert_storage(&mut self, storage: Storage) -> &mut Self { - self.state.insert_storage(storage); - self - } +pub struct DefaultClient(PhantomData<(Block, RtApi, Exec)>); - pub fn init_default( - self, - exec: Exec, - handle: Box, - ) -> (TFullClient, Arc>) { - self.init(exec, handle, None, None) +impl DefaultClient { + pub fn new() -> Self { + Self(Default::default()) } +} - pub fn init_with_config( - self, - exec: Exec, - handle: Box, - config: ClientConfig, - ) -> (TFullClient, Arc>) { - self.init(exec, handle, None, Some(config)) - } +#[cfg(not(feature = "runtime-benchmarks"))] +/// HostFunctions that do not include benchmarking specific host functions +pub type TWasmExecutor = WasmExecutor; +#[cfg(feature = "runtime-benchmarks")] +/// Host functions that include benchmarking specific functionalities +pub type TWasmExecutor = sc_executor::sp_wasm_interface::ExtendedHostFunctions< + sp_io::SubstrateHostFunctions, + frame_benchmarking::benchmarking::HostFunctions, +>; + +impl ClientProvider for DefaultClient +where + Block: BlockT, + RtApi: ConstructRuntimeApi> + Send + Sync + 'static, + >>::RuntimeApi: + TaggedTransactionQueue + + BlockBuilderApi + + ApiExt as Backend>::State>, + Exec: CodeExecutor + RuntimeVersionOf, +{ + type Api = as ProvideRuntimeApi>::Api; + type Backend = TFullBackend; + type Client = TFullClient; + type Exec = Exec; - pub fn init_full( - self, - exec: Exec, - handle: Box, - keystore: SyncCryptoStorePtr, + fn provide( + &self, config: ClientConfig, - ) -> (TFullClient, Arc>) { - self.init(exec, handle, Some(keystore), Some(config)) - } - - pub fn init_with_keystore( - self, - exec: Exec, - handle: Box, - keystore: SyncCryptoStorePtr, - ) -> (TFullClient, Arc>) { - self.init(exec, handle, Some(keystore), None) - } - - fn init( - self, - exec: Exec, - handle: Box, + genesis: Box, + execution_strategies: ExecutionStrategies, keystore: Option, - config: Option>, - ) -> (TFullClient, Arc>) { - let backend = self.state.backend(); - let config = config.clone().unwrap_or(Self::client_config()); - - // TODO: Handle unwrap - let executor = sc_service::client::LocalCallExecutor::new( + backend: Arc, + exec: LocalCallExecutor, + ) -> Result, ()> { + TFullClient::new( backend.clone(), exec, - handle, - config.clone(), - ) - .unwrap(); - - // TODO: Execution strategies default is not right. Use always wasm instead - let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( - ExecutionStrategies::default(), - keystore, - sc_offchain::OffchainDb::factory_from_backend(&*backend), - ); - - // TODO: Client config pass? - let client = sc_service::client::Client::< - TFullBackend, - TFullCallExecutor, - Block, - RtApi, - >::new( - backend.clone(), - executor, - &self.state, + &(*genesis), None, None, - extensions, + sc_client_api::execution_extensions::ExecutionExtensions::new( + execution_strategies, + keystore, + sc_offchain::OffchainDb::factory_from_backend(&*backend), + ), None, None, - config.clone(), + config, ) - .map_err(|_| "err".to_string()) - .unwrap(); - - (client, backend) - } - - fn client_config() -> ClientConfig { - ClientConfig::default() + .map_err(|_| ()) + .map(|client| Arc::new(client)) } } diff --git a/core/src/provider/state.rs b/core/src/provider/state.rs new file mode 100644 index 0000000..9f7a287 --- /dev/null +++ b/core/src/provider/state.rs @@ -0,0 +1,81 @@ +// Copyright 2021 Centrifuge Foundation (centrifuge.io). +// +// This file is part of the FUDGE project. +// FUDGE is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version (see http://www.gnu.org/licenses). +// Centrifuge is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! A module concerned with providing a data structure that can +//! be used to provide a genesis state for a builder. + +use sp_core::storage::well_known_keys::CODE; +use sp_runtime::BuildStorage; +use sp_storage::Storage; + +/// Helping struct to ease provisioning of a +/// genesis state. +pub struct StateProvider { + pseudo_genesis: Storage, +} + +impl StateProvider { + /// Anything that implements `trait BuildStorage` can appends its state to the + /// existing `StateProvider`. + /// + /// **panics+* upon providing wrongly formatted child storage items. + pub fn insert_storage(&mut self, storage: impl BuildStorage) -> &mut Self { + assert!(storage.assimilate_storage(&mut self.pseudo_genesis).is_ok()); + self + } + + /// Generates a truly empty `StateProvider` + pub fn empty() -> Self { + Self { + pseudo_genesis: Storage::default(), + } + } + + /// Builds a `StorageProvider` from anything that implements + /// `trait BuildStorage` + /// + /// E.g.: This could be used to a spec of a chain to build the + /// correct genesis state for a chain. All specs implement + /// `trait BuildStorage` + /// (See: https://github.com/paritytech/substrate/blob/0d64ba4268106fffe430d41b541c1aeedd4f8da5/client/chain-spec/src/chain_spec.rs#L111-L141) + pub fn from_storage(storage: impl BuildStorage) -> Self { + let mut provider = StateProvider::empty(); + provider.insert_storage(storage); + provider + } + + /// Creates a new instance of `StorageBuilder`. + /// + /// As instantiating an actual client needs in most cases + /// an existing wasm blob to be present, this method enforces + /// providing the code. + /// + /// Developers opting out of this should use `StateProvider::empty()`. + pub fn new(code: &[u8]) -> Self { + let mut storage = Storage::default(); + storage.top.insert(CODE.to_vec(), code.to_vec()); + + let mut provider = StateProvider::empty(); + provider.insert_storage(storage); + provider + } +} + +impl BuildStorage for StateProvider { + fn build_storage(&self) -> Result { + Ok(self.pseudo_genesis.clone()) + } + + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { + self.pseudo_genesis.assimilate_storage(storage) + } +} diff --git a/core/src/provider/state_provider.rs b/core/src/provider/state_provider.rs deleted file mode 100644 index 515e53a..0000000 --- a/core/src/provider/state_provider.rs +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2021 Centrifuge Foundation (centrifuge.io). -// -// This file is part of the FUDGE project. -// FUDGE is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version (see http://www.gnu.org/licenses). -// Centrifuge is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -use std::path::PathBuf; - -use sc_client_api::Backend; -use sc_client_db::{BlocksPruning, DatabaseSettings, DatabaseSource}; -use sc_service::PruningMode; -use sp_core::storage::well_known_keys::CODE; -use sp_database::MemDb; -use sp_runtime::{traits::Block as BlockT, BuildStorage}; -use sp_std::{marker::PhantomData, sync::Arc}; -use sp_storage::Storage; - -pub const CANONICALIZATION_DELAY: u64 = 4096; - -pub enum DbOpen { - FullConfig(DatabaseSettings), - SparseConfig { - path: PathBuf, - state_pruning: PruningMode, - }, - Default(PathBuf), -} - -pub struct StateProvider { - backend: Arc, - pseudo_genesis: Storage, - _phantom: PhantomData, -} - -impl StateProvider -where - Block: BlockT, - B: Backend, -{ - pub fn insert_storage(&mut self, storage: Storage) -> &mut Self { - let Storage { - top, - children_default, - } = storage; - - self.pseudo_genesis.top.extend(top.into_iter()); - for (k, other_map) in children_default.iter() { - let k = k.clone(); - if let Some(map) = self.pseudo_genesis.children_default.get_mut(&k) { - map.data - .extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(&other_map.child_info) { - // TODO: Error out instead - //return Err("Incompatible child info update".to_string()) - } - } else { - self.pseudo_genesis - .children_default - .insert(k, other_map.clone()); - } - } - self - } - - pub fn backend(&self) -> Arc { - self.backend.clone() - } -} - -impl StateProvider, Block> -where - Block: BlockT, -{ - pub fn from_db(open: DbOpen) -> Self { - let settings = match open { - DbOpen::FullConfig(settings) => settings, - DbOpen::SparseConfig { - path, - state_pruning, - } => DatabaseSettings { - trie_cache_maximum_size: None, - state_pruning: Some(state_pruning), - source: DatabaseSource::RocksDb { - path: path, - cache_size: 1024, - }, - blocks_pruning: BlocksPruning::All, - }, - DbOpen::Default(path) => DatabaseSettings { - trie_cache_maximum_size: None, - state_pruning: None, - source: DatabaseSource::RocksDb { - path: path, - cache_size: 1024, - }, - blocks_pruning: BlocksPruning::All, - }, - }; - - let backend = Arc::new( - sc_client_db::Backend::new(settings, CANONICALIZATION_DELAY) - .map_err(|_| ()) - .unwrap(), - ); - - Self { - backend, - pseudo_genesis: Storage::default(), - _phantom: Default::default(), - } - } - - pub fn from_spec() -> Self { - todo!() - } - - pub fn from_storage(storage: Storage) -> Self { - let mut provider = StateProvider::empty_default(None); - provider.insert_storage(storage); - provider - } - - pub fn empty_default(code: Option<&[u8]>) -> Self { - // TODO: Handle unwrap - let mut provider = StateProvider::with_in_mem_db().unwrap(); - - let mut storage = Storage::default(); - if let Some(code) = code { - storage.top.insert(CODE.to_vec(), code.to_vec()); - } - - provider.insert_storage(storage); - provider - } - - fn with_in_mem_db() -> Result { - let settings = DatabaseSettings { - trie_cache_maximum_size: None, - state_pruning: Some(PruningMode::ArchiveAll), - source: DatabaseSource::Custom { - db: Arc::new(MemDb::new()), - require_create_flag: true, - }, - blocks_pruning: BlocksPruning::All, - }; - - let backend = - Arc::new(sc_client_db::Backend::new(settings, CANONICALIZATION_DELAY).map_err(|_| ())?); - - Ok(Self { - backend, - pseudo_genesis: Storage::default(), - _phantom: Default::default(), - }) - } -} - -impl BuildStorage for StateProvider -where - Block: BlockT, - B: Backend, -{ - fn build_storage(&self) -> Result { - Ok(self.pseudo_genesis.clone()) - } - - fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { - self.pseudo_genesis.assimilate_storage(storage) - } -} diff --git a/core/src/tests/parachain.rs b/core/src/tests/parachain.rs index 37d990d..d85461f 100644 --- a/core/src/tests/parachain.rs +++ b/core/src/tests/parachain.rs @@ -15,12 +15,10 @@ use fudge_test_runtime::{ AuraId, Block as PTestBlock, Runtime as PRuntime, RuntimeApi as PTestRtApi, WASM_BINARY as PCODE, }; -use polkadot_core_primitives::{Block as RTestBlock, Header as RTestHeader}; +use polkadot_core_primitives::Block as RTestBlock; use polkadot_parachain::primitives::Id; use polkadot_runtime::{Runtime as RRuntime, RuntimeApi as RTestRtApi, WASM_BINARY as RCODE}; -use polkadot_runtime_parachains::paras; -use sc_executor::{WasmExecutionMethod, WasmExecutor as TestExec}; -use sc_service::{TFullBackend, TFullClient, TaskManager}; +use sc_service::{TFullBackend, TFullClient}; use sp_api::BlockId; use sp_consensus_babe::SlotDuration; use sp_core::H256; @@ -32,208 +30,105 @@ use tokio::runtime::Handle; ///! Test for the ParachainBuilder use crate::digest::{DigestCreator, DigestProvider, FudgeAuraDigest, FudgeBabeDigest}; use crate::{ - builder::relay_chain::types::Heads, + builder::relay_chain::{types::Heads, InherentBuilder}, inherent::{ FudgeDummyInherentRelayParachain, FudgeInherentParaParachain, FudgeInherentTimestamp, }, - provider::EnvProvider, - FudgeParaChain, ParachainBuilder, RelaychainBuilder, + provider::TWasmExecutor, + FudgeParaChain, ParachainBuilder, RelaychainBuilder, StateProvider, }; -type RelayBuilder = RelaychainBuilder< - RTestBlock, - RTestRtApi, - TestExec, - Box< - dyn CreateInherentDataProviders< - RTestBlock, - (), - InherentDataProviders = ( - FudgeInherentTimestamp, - sp_consensus_babe::inherents::InherentDataProvider, - sp_authorship::InherentDataProvider, - FudgeDummyInherentRelayParachain, - ), - >, - >, - (), - Box + Send + Sync>, - R, - TFullBackend, - TFullClient>, ->; - -fn generate_default_setup_parachain( - manager: &TaskManager, - mut storage: Storage, - cidp: Box< - dyn FnOnce( - Arc>>, - ) -> CIDP, - >, - dp: Box< - dyn FnOnce( - Arc>>, - ) -> DP, +fn default_para_builder( + handle: Handle, + genesis: Storage, + inherent_builder: InherentBuilder< + TFullClient, + TFullBackend, >, ) -> ParachainBuilder< PTestBlock, PTestRtApi, - TestExec, - CIDP, + TWasmExecutor, + impl CreateInherentDataProviders, (), - DP, - TFullBackend, - TFullClient>, -> -where - CIDP: CreateInherentDataProviders + 'static, - DP: DigestCreator + 'static, -{ - let mut provider = - EnvProvider::>::with_code( - PCODE.unwrap(), - ); - pallet_aura::GenesisConfig:: { - authorities: vec![AuraId::from(sp_core::sr25519::Public([0u8; 32]))], - } - .assimilate_storage(&mut storage) - .unwrap(); - provider.insert_storage(storage); - - let (client, backend) = provider.init_default( - TestExec::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), - Box::new(manager.spawn_handle()), + impl DigestCreator, +> { + let mut state = StateProvider::new(PCODE.expect("Wasm is build. Qed.")); + state.insert_storage( + pallet_aura::GenesisConfig:: { + authorities: vec![AuraId::from(sp_core::sr25519::Public([0u8; 32]))], + } + .build_storage() + .unwrap(), ); - let client = Arc::new(client); - - ParachainBuilder::, _, _, _>::new( - manager, - backend, - client.clone(), - cidp(client.clone()), - dp(client.clone()), - ) -} + state.insert_storage(genesis); -fn generate_default_setup_relay_chain( - manager: &TaskManager, - mut storage: Storage, -) -> RelaychainBuilder< - RTestBlock, - RTestRtApi, - TestExec, - Box< - dyn CreateInherentDataProviders< - RTestBlock, - (), - InherentDataProviders = ( - FudgeInherentTimestamp, - sp_consensus_babe::inherents::InherentDataProvider, - sp_authorship::InherentDataProvider, - FudgeDummyInherentRelayParachain, - ), - >, - >, - (), - Box + Send + Sync>, - Runtime, - TFullBackend, - TFullClient>, -> -where - Runtime: pallet_babe::Config - + polkadot_runtime_parachains::configuration::Config - + paras::Config - + frame_system::Config - + pallet_timestamp::Config, -{ - let mut provider = - EnvProvider::>::with_code( - RCODE.unwrap(), - ); - polkadot_runtime_parachains::configuration::GenesisConfig::::default() - .assimilate_storage(&mut storage) - .unwrap(); - provider.insert_storage(storage); + let mut init = crate::provider::initiator::default(handle); + init.with_genesis(Box::new(state)); - let (client, backend) = provider.init_default( - TestExec::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), - Box::new(manager.spawn_handle()), - ); - let client = Arc::new(client); - let clone_client = client.clone(); // Init timestamp instance_id - let instance_id = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); + let instance_id_para = + FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(12), None); + + let cidp = move |_parent: H256, ()| { + let inherent_builder_clone = inherent_builder.clone(); + async move { + let timestamp = FudgeInherentTimestamp::get_instance(instance_id_para) + .expect("Instance is initialized. qed"); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + timestamp.current_time(), + SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), + ); + let inherent = inherent_builder_clone.parachain_inherent().await.unwrap(); + let relay_para_inherent = FudgeInherentParaParachain::new(inherent); + Ok((timestamp, slot, relay_para_inherent)) + } + }; - let cidp = Box::new( - |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, slot, uncles, relay_para_inherent)) - } + let dp = |clone_client: Arc>| { + move |parent, inherents| { + let client = clone_client.clone(); + + async move { + let aura = FudgeAuraDigest::< + PTestBlock, + TFullClient, + >::new(&*client); + + let digest = aura.build_digest(&parent, &inherents).await?; + Ok(digest) } - }, - ); + } + }; - let dp = Box::new(move |parent, inherents| async move { - let babe = FudgeBabeDigest::::new(); - let digest = babe.build_digest(&parent, &inherents).await?; - Ok(digest) - }); - - RelaychainBuilder::< - RTestBlock, - RTestRtApi, - TestExec, - _, - _, - _, - Runtime, - >::new(manager, backend, client, Box::new(cidp(clone_client)), dp) + ParachainBuilder::new(init, |client| (cidp, dp(client))) } -#[tokio::test] -async fn parachain_creates_correct_inherents() { - super::utils::init_logs(); - let manager = TaskManager::new(Handle::current(), None).unwrap(); - - let mut relay_builder = - generate_default_setup_relay_chain::(&manager, Storage::default()); - let para_id = Id::from(2001u32); - let inherent_builder = relay_builder.inherent_builder(para_id.clone()); +fn cidp_and_dp_relay( + client: Arc>, +) -> ( + impl CreateInherentDataProviders, + impl DigestCreator, +) { // Init timestamp instance_id - let instance_id_para = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(12), None); + let instance_id = + FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); + + let cidp = move |clone_client: Arc>| { + move |parent: H256, ()| { + let client = clone_client.clone(); + let parent_header = client + .header(&BlockId::Hash(parent.clone())) + .unwrap() + .unwrap(); - let cidp = Box::new(move |_| { - move |_parent: H256, ()| { - let inherent_builder_clone = inherent_builder.clone(); async move { - let timestamp = FudgeInherentTimestamp::get_instance(instance_id_para) + let uncles = + sc_consensus_uncles::create_uncles_inherent_data_provider(&*client, parent)?; + + let timestamp = FudgeInherentTimestamp::get_instance(instance_id) .expect("Instance is initialized. qed"); let slot = @@ -241,37 +136,59 @@ async fn parachain_creates_correct_inherents() { timestamp.current_time(), SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), ); - let inherent = inherent_builder_clone.parachain_inherent().await.unwrap(); - let relay_para_inherent = FudgeInherentParaParachain::new(inherent); - Ok((timestamp, slot, relay_para_inherent)) + + let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); + Ok((timestamp, uncles, slot, relay_para_inherent)) } } - }); - let dp = Box::new( - |clone_client: Arc< - TFullClient>, - >| { - move |parent, inherents| { - let client = clone_client.clone(); - - async move { - let aura = FudgeAuraDigest::< - PTestBlock, - TFullClient< - PTestBlock, - PTestRtApi, - TestExec, - >, - >::new(&*client); - - let digest = aura.build_digest(&parent, &inherents).await?; - Ok(digest) - } - } - }, + }; + + let dp = move |parent, inherents| async move { + let mut digest = sp_runtime::Digest::default(); + + let babe = FudgeBabeDigest::::new(); + babe.append_digest(&mut digest, &parent, &inherents).await?; + + Ok(digest) + }; + + (cidp(client), dp) +} + +fn default_relay_builder( + handle: Handle, + genesis: Storage, +) -> RelaychainBuilder< + RTestBlock, + RTestRtApi, + TWasmExecutor, + impl CreateInherentDataProviders, + (), + impl DigestCreator, + RRuntime, +> { + let mut state = StateProvider::new(RCODE.expect("Wasm is build. Qed.")); + state.insert_storage( + polkadot_runtime_parachains::configuration::GenesisConfig::::default() + .build_storage() + .unwrap(), ); + state.insert_storage(genesis); + + let mut init = crate::provider::initiator::default(handle); + init.with_genesis(Box::new(state)); - let mut builder = generate_default_setup_parachain(&manager, Storage::default(), cidp, dp); + RelaychainBuilder::new(init, cidp_and_dp_relay) +} + +#[tokio::test] +async fn parachain_creates_correct_inherents() { + super::utils::init_logs(); + + let mut relay_builder = default_relay_builder(Handle::current(), Storage::default()); + let para_id = Id::from(2001u32); + let inherent_builder = relay_builder.inherent_builder(para_id.clone()); + let mut builder = default_para_builder(Handle::current(), Storage::default(), inherent_builder); let para = FudgeParaChain { id: para_id, diff --git a/core/src/tests/relay_chain.rs b/core/src/tests/relay_chain.rs index 64d5587..a726ea8 100644 --- a/core/src/tests/relay_chain.rs +++ b/core/src/tests/relay_chain.rs @@ -13,9 +13,7 @@ use fudge_test_runtime::WASM_BINARY as PARA_CODE; use polkadot_parachain::primitives::{HeadData, Id, ValidationCode}; use polkadot_runtime::{Block as TestBlock, Runtime, RuntimeApi as TestRtApi, WASM_BINARY as CODE}; -use polkadot_runtime_parachains::paras; -use sc_executor::{WasmExecutionMethod, WasmExecutor as TestExec}; -use sc_service::{TFullBackend, TFullClient, TaskManager}; +use sc_service::TFullClient; use sp_api::BlockId; use sp_consensus_babe::SlotDuration; use sp_core::H256; @@ -27,110 +25,85 @@ use tokio::runtime::Handle; use crate::{ digest::{DigestCreator, DigestProvider, FudgeBabeDigest}, inherent::{FudgeDummyInherentRelayParachain, FudgeInherentTimestamp}, - provider::EnvProvider, - FudgeParaChain, RelayChainTypes, RelaychainBuilder, + provider::TWasmExecutor, + FudgeParaChain, RelayChainTypes, RelaychainBuilder, StateProvider, }; -fn generate_default_setup_relay_chain( - manager: &TaskManager, - storage: Storage, - cidp: Box< - dyn FnOnce( - Arc>>, - ) -> CIDP, - >, - dp: DP, -) -> RelaychainBuilder< - TestBlock, - TestRtApi, - TestExec, - CIDP, - (), - DP, - Runtime, - TFullBackend, - TFullClient>, -> -where - CIDP: CreateInherentDataProviders + 'static, - DP: DigestCreator + 'static, - Runtime: paras::Config + frame_system::Config, -{ - let mut provider = - EnvProvider::>::with_code( - CODE.unwrap(), - ); - provider.insert_storage(storage); - - let (client, backend) = provider.init_default( - TestExec::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), - Box::new(manager.spawn_handle()), - ); - let client = Arc::new(client); - let clone_client = client.clone(); - - RelaychainBuilder::< - TestBlock, - TestRtApi, - TestExec, - _, - _, - _, - Runtime, - >::new(manager, backend, client, cidp(clone_client), dp) -} - -#[tokio::test] -async fn onboarding_parachain_works() { - super::utils::init_logs(); - - let manager = TaskManager::new(Handle::current(), None).unwrap(); +fn cidp_and_dp( + client: Arc>, +) -> ( + impl CreateInherentDataProviders, + impl DigestCreator, +) { // Init timestamp instance_id let instance_id = FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); - let cidp = Box::new( - move |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, uncles, slot, relay_para_inherent)) - } + let cidp = move |clone_client: Arc>| { + move |parent: H256, ()| { + let client = clone_client.clone(); + let parent_header = client + .header(&BlockId::Hash(parent.clone())) + .unwrap() + .unwrap(); + + async move { + let uncles = + sc_consensus_uncles::create_uncles_inherent_data_provider(&*client, parent)?; + + let timestamp = FudgeInherentTimestamp::get_instance(instance_id) + .expect("Instance is initialized. qed"); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + timestamp.current_time(), + SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), + ); + + let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); + Ok((timestamp, uncles, slot, relay_para_inherent)) } - }, - ); - let dp = Box::new(move |parent, inherents| async move { + } + }; + + let dp = move |parent, inherents| async move { let mut digest = sp_runtime::Digest::default(); let babe = FudgeBabeDigest::::new(); babe.append_digest(&mut digest, &parent, &inherents).await?; Ok(digest) - }); - let mut builder = - generate_default_setup_relay_chain::<_, _, Runtime>(&manager, Storage::default(), cidp, dp); + }; + + (cidp(client), dp) +} + +fn default_relay_builder( + handle: Handle, + genesis: Storage, +) -> RelaychainBuilder< + TestBlock, + TestRtApi, + TWasmExecutor, + impl CreateInherentDataProviders, + (), + impl DigestCreator, + Runtime, +> { + let mut state = StateProvider::new(CODE.expect("Wasm is build. Qed.")); + state.insert_storage(genesis); + + let mut init = crate::provider::initiator::default(handle); + init.with_genesis(Box::new(state)); + + RelaychainBuilder::new(init, cidp_and_dp) +} + +#[tokio::test] +async fn onboarding_parachain_works() { + super::utils::init_logs(); + let mut builder = default_relay_builder(Handle::current(), Storage::default()); let id = Id::from(2002u32); let code = ValidationCode(PARA_CODE.unwrap().to_vec()); let code_hash = code.hash(); diff --git a/core/src/tests/stand_alone.rs b/core/src/tests/stand_alone.rs index 874c966..7674f4f 100644 --- a/core/src/tests/stand_alone.rs +++ b/core/src/tests/stand_alone.rs @@ -10,79 +10,120 @@ // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +use std::path::PathBuf; + use frame_benchmarking::account; use polkadot_runtime::{Block as TestBlock, Runtime, RuntimeApi as TestRtApi, WASM_BINARY as CODE}; -use sc_executor::{WasmExecutionMethod, WasmExecutor as TestExec}; -use sc_service::{PruningMode, TFullBackend, TFullClient, TaskManager}; +use sc_service::TFullClient; use sp_api::BlockId; use sp_consensus_babe::SlotDuration; use sp_core::H256; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::{AccountId32, CryptoTypeId, KeyTypeId, MultiAddress, Storage}; +use sp_runtime::{AccountId32, MultiAddress, Storage}; use sp_std::sync::Arc; use tokio::runtime::Handle; use crate::{ digest::{DigestCreator, DigestProvider, FudgeBabeDigest}, inherent::{FudgeDummyInherentRelayParachain, FudgeInherentTimestamp}, - provider::{DbOpen, EnvProvider}, - StandAloneBuilder, + provider::TWasmExecutor, + DiskDb, StandAloneBuilder, StateProvider, }; -const KEY_TYPE: KeyTypeId = KeyTypeId(*b"test"); -const CRYPTO_TYPE: CryptoTypeId = CryptoTypeId(*b"test"); - -fn generate_default_setup_stand_alone( - manager: &TaskManager, - storage: Storage, - cidp: Box< - dyn FnOnce( - Arc>>, - ) -> CIDP, - >, - dp: DP, +fn cidp_and_dp( + client: Arc>, +) -> ( + impl CreateInherentDataProviders, + impl DigestCreator, +) { + // Init timestamp instance_id + let instance_id = + FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); + + let cidp = move |clone_client: Arc>| { + move |parent: H256, ()| { + let client = clone_client.clone(); + let parent_header = client + .header(&BlockId::Hash(parent.clone())) + .unwrap() + .unwrap(); + + async move { + let uncles = + sc_consensus_uncles::create_uncles_inherent_data_provider(&*client, parent)?; + + let timestamp = FudgeInherentTimestamp::get_instance(instance_id) + .expect("Instance is initialized. qed"); + + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + timestamp.current_time(), + SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), + ); + + let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); + Ok((timestamp, uncles, slot, relay_para_inherent)) + } + } + }; + + let dp = move |parent, inherents| async move { + let mut digest = sp_runtime::Digest::default(); + + let babe = FudgeBabeDigest::::new(); + babe.append_digest(&mut digest, &parent, &inherents).await?; + + Ok(digest) + }; + + (cidp(client), dp) +} + +fn default_builder( + handle: Handle, + genesis: Storage, ) -> StandAloneBuilder< TestBlock, TestRtApi, - TestExec, - CIDP, + TWasmExecutor, + impl CreateInherentDataProviders, (), - DP, - TFullBackend, - TFullClient>, -> -where - CIDP: CreateInherentDataProviders + 'static, - DP: DigestCreator + 'static, -{ - let mut provider = - EnvProvider::>::with_code( - CODE.unwrap(), - ); - provider.insert_storage(storage); - - let (client, backend) = provider.init_default( - TestExec::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), - Box::new(manager.spawn_handle()), - ); - let client = Arc::new(client); - let clone_client = client.clone(); - - StandAloneBuilder::, _, _, _>::new( - manager, - backend, - client, - cidp(clone_client), - dp, - ) + impl DigestCreator, +> { + let mut state = StateProvider::new(CODE.expect("Wasm is build. Qed.")); + state.insert_storage(genesis); + + let mut init = crate::provider::initiator::default(handle); + init.with_genesis(Box::new(state)); + + StandAloneBuilder::new(init, cidp_and_dp) } +fn default_builder_disk( + handle: Handle, + path: PathBuf, + genesis: Storage, +) -> StandAloneBuilder< + TestBlock, + TestRtApi, + TWasmExecutor, + impl CreateInherentDataProviders, + (), + impl DigestCreator, +> { + let mut state = StateProvider::new(CODE.expect("Wasm is build. Qed.")); + state.insert_storage(genesis); + + let mut init = crate::provider::initiator::default_with(handle, DiskDb::new(path)); + init.with_genesis(Box::new(state)); + + StandAloneBuilder::new(init, cidp_and_dp) +} #[tokio::test] async fn mutating_genesis_works() { super::utils::init_logs(); - let manager = TaskManager::new(Handle::current(), None).unwrap(); - let storage = pallet_balances::GenesisConfig:: { + let genesis = pallet_balances::GenesisConfig:: { balances: vec![ (account("test", 0, 0), 10_000_000_000_000u128), (AccountId32::new([0u8; 32]), 10_000_000_000_000u128), @@ -90,53 +131,8 @@ async fn mutating_genesis_works() { } .build_storage() .unwrap(); - // Init timestamp instance_id - let instance_id = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); - - let cidp = Box::new( - move |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, uncles, slot, relay_para_inherent)) - } - } - }, - ); - - let dp = Box::new(move |parent, inherents| async move { - let mut digest = sp_runtime::Digest::default(); - - let babe = FudgeBabeDigest::::new(); - babe.append_digest(&mut digest, &parent, &inherents).await?; - - Ok(digest) - }); - - let mut builder = generate_default_setup_stand_alone(&manager, storage, cidp, dp); + let mut builder = default_builder(Handle::current(), genesis); let (send_data_pre, recv_data_pre) = builder .with_mut_state(|| { polkadot_runtime::Balances::transfer( @@ -185,18 +181,7 @@ async fn opening_state_from_db_path_works() { } create_dir_all(&static_path).unwrap(); - let manager = TaskManager::new(Handle::current(), None).unwrap(); - - let instance_id = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); - let mut provider = - EnvProvider::>::from_db( - DbOpen::SparseConfig { - path: static_path.clone(), - state_pruning: PruningMode::ArchiveAll, - }, - ); - let mut storage = pallet_balances::GenesisConfig:: { + let genesis = pallet_balances::GenesisConfig:: { balances: vec![ (account("test", 0, 0), 10_000_000_000_000u128), (AccountId32::new([0u8; 32]), 10_000_000_000_000u128), @@ -204,65 +189,8 @@ async fn opening_state_from_db_path_works() { } .build_storage() .unwrap(); - storage.top.insert( - sp_storage::well_known_keys::CODE.to_vec(), - CODE.unwrap().to_vec(), - ); - provider.insert_storage(storage); - let (client, backend) = provider.init_default( - TestExec::new(WasmExecutionMethod::Interpreted, Some(8), 8, None, 2), - Box::new(manager.spawn_handle()), - ); - let client = Arc::new(client); - let cidp = Box::new( - move |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, uncles, slot, relay_para_inherent)) - } - } - }, - ); - - let dp = Box::new(move |parent, inherents| async move { - let mut digest = sp_runtime::Digest::default(); - - let babe = FudgeBabeDigest::::new(); - babe.append_digest(&mut digest, &parent, &inherents).await?; - Ok(digest) - }); - - let mut builder = StandAloneBuilder::< - TestBlock, - TestRtApi, - TestExec, - _, - _, - _, - >::new(&manager, backend, client.clone(), cidp(client), dp); + let mut builder = default_builder_disk(Handle::current(), static_path.clone(), genesis); for _ in 0..20 { builder.build_block().unwrap(); @@ -359,50 +287,7 @@ async fn build_relay_block_works() { // install global collector configured based on RUST_LOG env var. super::utils::init_logs(); - let manager = TaskManager::new(Handle::current(), None).unwrap(); - // Init timestamp instance_id - let instance_id = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); - - let cidp = Box::new( - move |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, slot, uncles, relay_para_inherent)) - } - } - }, - ); - let dp = Box::new(move |parent, inherents| async move { - let mut digest = sp_runtime::Digest::default(); - - let babe = FudgeBabeDigest::::new(); - babe.append_digest(&mut digest, &parent, &inherents).await?; - - Ok(digest) - }); - let mut builder = generate_default_setup_stand_alone(&manager, Storage::default(), cidp, dp); + let mut builder = default_builder(Handle::current(), Storage::default()); let num_before = builder .with_state(|| frame_system::Pallet::::block_number()) @@ -435,51 +320,7 @@ async fn build_relay_block_works() { async fn build_relay_block_works_and_mut_is_build_upon() { super::utils::init_logs(); - let manager = TaskManager::new(Handle::current(), None).unwrap(); - // Init timestamp instance_id - let instance_id = - FudgeInherentTimestamp::create_instance(sp_std::time::Duration::from_secs(6), None); - - let cidp = Box::new( - move |clone_client: Arc< - TFullClient>, - >| { - move |parent: H256, ()| { - let client = clone_client.clone(); - let parent_header = client - .header(&BlockId::Hash(parent.clone())) - .unwrap() - .unwrap(); - - async move { - let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider( - &*client, parent, - )?; - - let timestamp = FudgeInherentTimestamp::get_instance(instance_id) - .expect("Instance is initialized. qed"); - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - timestamp.current_time(), - SlotDuration::from_millis(std::time::Duration::from_secs(6).as_millis() as u64), - ); - - let relay_para_inherent = FudgeDummyInherentRelayParachain::new(parent_header); - Ok((timestamp, slot, uncles, relay_para_inherent)) - } - } - }, - ); - let dp = Box::new(move |parent, inherents| async move { - let mut digest = sp_runtime::Digest::default(); - - let babe = FudgeBabeDigest::::new(); - babe.append_digest(&mut digest, &parent, &inherents).await?; - - Ok(digest) - }); - - let storage = pallet_balances::GenesisConfig:: { + let genesis = pallet_balances::GenesisConfig:: { balances: vec![ (account("test", 0, 0), 10_000_000_000_000u128), (AccountId32::new([0u8; 32]), 10_000_000_000_000u128), @@ -488,7 +329,7 @@ async fn build_relay_block_works_and_mut_is_build_upon() { .build_storage() .unwrap(); - let mut builder = generate_default_setup_stand_alone(&manager, storage, cidp, dp); + let mut builder = default_builder(Handle::current(), genesis); let num_before = builder .with_state(|| frame_system::Pallet::::block_number()) diff --git a/fudge/src/lib.rs b/fudge/src/lib.rs index 7b23e15..e355f85 100644 --- a/fudge/src/lib.rs +++ b/fudge/src/lib.rs @@ -11,47 +11,55 @@ // GNU General Public License for more details. pub use fudge_companion::companion; -pub use fudge_core::{digest, inherent, provider::EnvProvider}; +use fudge_core::provider::TWasmExecutor; +pub use fudge_core::{ + digest, inherent, + provider::{ + backend::{DiskDatabaseType, DiskDb, MemDb}, + initiator::{FromConfiguration, Init, PoolConfig}, + state::StateProvider, + BackendProvider, ClientProvider, DefaultClient, Initiator, + }, +}; ///! FUDGE - FUlly Decoupled Generic Environment for Substrate-based Chains ///! ///! Generally only this dependency is needed in order to use FUDGE. ///! Developers who want to use the more raw apis and types are ///! referred to the fudge-core repository. -use sc_executor::WasmExecutor; use sc_service::{TFullBackend, TFullClient}; -pub type ParachainBuilder = fudge_core::ParachainBuilder< +pub type ParachainBuilder = fudge_core::ParachainBuilder< Block, RtApi, - WasmExecutor, + TWasmExecutor, CIDP, (), DP, TFullBackend, - TFullClient>, + TFullClient, >; -pub type RelaychainBuilder = fudge_core::RelaychainBuilder< +pub type RelaychainBuilder = fudge_core::RelaychainBuilder< Block, RtApi, - WasmExecutor, + TWasmExecutor, CIDP, (), DP, Runtime, TFullBackend, - TFullClient>, + TFullClient, >; -pub type StandaloneBuilder = fudge_core::StandAloneBuilder< +pub type StandaloneBuilder = fudge_core::StandAloneBuilder< Block, RtApi, - WasmExecutor, + TWasmExecutor, CIDP, (), DP, TFullBackend, - TFullClient>, + TFullClient, >; pub mod primitives { diff --git a/integration_test/src/main.rs b/integration_test/src/main.rs index f1bff2d..d2569ae 100644 --- a/integration_test/src/main.rs +++ b/integration_test/src/main.rs @@ -63,9 +63,9 @@ const PARA_ID: u32 = 2002u32; #[fudge::companion] struct TestEnv { #[fudge::parachain(PARA_ID)] - centrifuge: ParachainBuilder, + centrifuge: ParachainBuilder, #[fudge::parachain(2000u32)] - sibling: ParachainBuilder, + sibling: ParachainBuilder, #[fudge::relaychain] - polkadot: RelaychainBuilder, + polkadot: RelaychainBuilder, }