diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 2c2909bd45..0753f66837 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -139,6 +139,7 @@ jobs: - tests::nakamoto_integrations::utxo_check_on_startup_recover - tests::nakamoto_integrations::v3_signer_api_endpoint - tests::nakamoto_integrations::signer_chainstate + - tests::nakamoto_integrations::clarity_cost_spend_down # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/CHANGELOG.md b/CHANGELOG.md index 0470bab77b..e66d126390 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Remove the panic for reporting DB deadlocks (just error and continue waiting) - Add index to `metadata_table` in Clarity DB on `blockhash` - Add `block_commit_delay_ms` to the config file to control the time to wait after seeing a new burn block, before submitting a block commit, to allow time for the first Nakamoto block of the new tenure to be mined, allowing this miner to avoid the need to RBF the block commit. +- Add `tenure_cost_limit_per_block_percentage` to the miner config file to control the percentage remaining tenure cost limit to consume per nakamoto block. ## [3.0.0.0.1] diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 0751822ed0..b3ee746fcf 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -896,6 +896,7 @@ impl LimitedCostTracker { Self::Free => ExecutionCost::max_value(), } } + pub fn get_memory(&self) -> u64 { match self { Self::Limited(TrackerData { memory, .. }) => *memory, @@ -1170,6 +1171,7 @@ pub trait CostOverflowingMath { fn cost_overflow_mul(self, other: T) -> Result; fn cost_overflow_add(self, other: T) -> Result; fn cost_overflow_sub(self, other: T) -> Result; + fn cost_overflow_div(self, other: T) -> Result; } impl CostOverflowingMath for u64 { @@ -1185,6 +1187,10 @@ impl CostOverflowingMath for u64 { self.checked_sub(other) .ok_or_else(|| CostErrors::CostOverflow) } + fn cost_overflow_div(self, other: u64) -> Result { + self.checked_div(other) + .ok_or_else(|| CostErrors::CostOverflow) + } } impl ExecutionCost { @@ -1293,6 +1299,15 @@ impl ExecutionCost { Ok(()) } + pub fn divide(&mut self, divisor: u64) -> Result<()> { + self.runtime = self.runtime.cost_overflow_div(divisor)?; + self.read_count = self.read_count.cost_overflow_div(divisor)?; + self.read_length = self.read_length.cost_overflow_div(divisor)?; + self.write_length = self.write_length.cost_overflow_div(divisor)?; + self.write_count = self.write_count.cost_overflow_div(divisor)?; + Ok(()) + } + /// Returns whether or not this cost exceeds any dimension of the /// other cost. pub fn exceeds(&self, other: &ExecutionCost) -> bool { diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 04401a0d9b..0291b1dad2 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -25,7 +25,7 @@ use clarity::vm::analysis::{CheckError, CheckErrors}; use clarity::vm::ast::errors::ParseErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; -use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::{ExecutionCost, LimitedCostTracker, TrackerData}; use clarity::vm::database::BurnStateDB; use clarity::vm::errors::Error as InterpreterError; use clarity::vm::types::{QualifiedContractIdentifier, TypeSignature}; @@ -124,6 +124,8 @@ pub struct NakamotoBlockBuilder { txs: Vec, /// header we're filling in pub header: NakamotoBlockHeader, + /// Optional soft limit for this block's budget usage + soft_limit: Option, } pub struct MinerTenureInfo<'a> { @@ -159,6 +161,7 @@ impl NakamotoBlockBuilder { bytes_so_far: 0, txs: vec![], header: NakamotoBlockHeader::genesis(), + soft_limit: None, } } @@ -176,6 +179,10 @@ impl NakamotoBlockBuilder { /// /// * `coinbase` - the coinbase tx if this is going to start a new tenure /// + /// * `bitvec_len` - the length of the bitvec of reward addresses that should be punished or not in this block. + /// + /// * `soft_limit` - an optional soft limit for the block's clarity cost for this block + /// pub fn new( parent_stacks_header: &StacksHeaderInfo, tenure_id_consensus_hash: &ConsensusHash, @@ -183,6 +190,7 @@ impl NakamotoBlockBuilder { tenure_change: Option<&StacksTransaction>, coinbase: Option<&StacksTransaction>, bitvec_len: u16, + soft_limit: Option, ) -> Result { let next_height = parent_stacks_header .anchored_header @@ -222,6 +230,7 @@ impl NakamotoBlockBuilder { .map(|b| b.timestamp) .unwrap_or(0), ), + soft_limit, }) } @@ -509,6 +518,7 @@ impl NakamotoBlockBuilder { tenure_info.tenure_change_tx(), tenure_info.coinbase_tx(), signer_bitvec_len, + None, )?; let ts_start = get_epoch_time_ms(); @@ -521,6 +531,37 @@ impl NakamotoBlockBuilder { .block_limit() .expect("Failed to obtain block limit from miner's block connection"); + let mut soft_limit = None; + if let Some(percentage) = settings + .mempool_settings + .tenure_cost_limit_per_block_percentage + { + // Make sure we aren't actually going to multiply by 0 or attempt to increase the block limit. + assert!( + (1..=100).contains(&percentage), + "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" + ); + let mut remaining_limit = block_limit.clone(); + let cost_so_far = tenure_tx.cost_so_far(); + if remaining_limit.sub(&cost_so_far).is_ok() { + if remaining_limit.divide(100).is_ok() { + remaining_limit.multiply(percentage.into()).expect( + "BUG: failed to multiply by {percentage} when previously divided by 100", + ); + remaining_limit.add(&cost_so_far).expect("BUG: unexpected overflow when adding cost_so_far, which was previously checked"); + debug!( + "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; + "remaining_limit" => %remaining_limit, + "cost_so_far" => %cost_so_far, + "block_limit" => %block_limit, + ); + soft_limit = Some(remaining_limit); + } + }; + } + + builder.soft_limit = soft_limit; + let initial_txs: Vec<_> = [ tenure_info.tenure_change_tx.clone(), tenure_info.coinbase_tx.clone(), @@ -607,26 +648,19 @@ impl BlockBuilder for NakamotoBlockBuilder { return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); } + let non_boot_code_contract_call = match &tx.payload { + TransactionPayload::ContractCall(cc) => !cc.address.is_boot_code_addr(), + TransactionPayload::SmartContract(..) => true, + _ => false, + }; + match limit_behavior { BlockLimitFunction::CONTRACT_LIMIT_HIT => { - match &tx.payload { - TransactionPayload::ContractCall(cc) => { - // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval - // other contract calls - if !cc.address.is_boot_code_addr() { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - } - TransactionPayload::SmartContract(..) => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - _ => {} + if non_boot_code_contract_call { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); } } BlockLimitFunction::LIMIT_REACHED => { @@ -653,70 +687,83 @@ impl BlockBuilder for NakamotoBlockBuilder { ); return TransactionResult::problematic(&tx, Error::NetError(e)); } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), - Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - let mut measured_cost = cost_after; - let measured_cost = if measured_cost.sub(&cost_before).is_ok() { - Some(measured_cost) - } else { - warn!( - "Failed to compute measured cost of a too big transaction" - ); - None - }; - return TransactionResult::error( - &tx, - Error::TransactionTooBigError(measured_cost), - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } + + let cost_before = clarity_tx.cost_so_far(); + let (fee, receipt) = + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ast_rules) { + Ok(x) => x, + Err(e) => { + return parse_process_transaction_error(clarity_tx, tx, e); } + }; + let cost_after = clarity_tx.cost_so_far(); + let mut soft_limit_reached = false; + // We only attempt to apply the soft limit to non-boot code contract calls. + if non_boot_code_contract_call { + if let Some(soft_limit) = self.soft_limit.as_ref() { + soft_limit_reached = cost_after.exceeds(soft_limit); } - }; + } + info!("Include tx"; "tx" => %tx.txid(), "payload" => tx.payload.name(), - "origin" => %tx.origin_address()); + "origin" => %tx.origin_address(), + "soft_limit_reached" => soft_limit_reached, + "cost_after" => %cost_after, + "cost_before" => %cost_before, + ); // save self.txs.push(tx.clone()); - TransactionResult::success(&tx, fee, receipt) + TransactionResult::success_with_soft_limit(&tx, fee, receipt, soft_limit_reached) }; self.bytes_so_far += tx_len; result } } + +fn parse_process_transaction_error( + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + e: Error, +) -> TransactionResult { + let (is_problematic, e) = TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + TransactionResult::problematic(&tx, e) + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + let mut measured_cost = cost_after; + let measured_cost = if measured_cost.sub(&cost_before).is_ok() { + Some(measured_cost) + } else { + warn!("Failed to compute measured cost of a too big transaction"); + None + }; + TransactionResult::error(&tx, Error::TransactionTooBigError(measured_cost)) + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError) + } + } + _ => TransactionResult::error(&tx, e), + } + } +} diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 6f929e0031..0645ecd15b 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -729,6 +729,7 @@ impl TestStacksNode { None }, 1, + None, ) .unwrap() } else { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index c3b60c5da8..7a72cc1652 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -287,6 +287,8 @@ pub struct TransactionSuccess { /// The fee that was charged to the user for doing this transaction. pub fee: u64, pub receipt: StacksTransactionReceipt, + /// Whether the soft limit was reached after this transaction was processed. + pub soft_limit_reached: bool, } /// Represents a failed transaction. Something went wrong when processing this transaction. @@ -319,6 +321,7 @@ pub struct TransactionSuccessEvent { pub fee: u64, pub execution_cost: ExecutionCost, pub result: Value, + pub soft_limit_reached: bool, } /// Represents an event for a failed transaction. Something went wrong when processing this transaction. @@ -448,6 +451,24 @@ impl TransactionResult { tx: transaction.clone(), fee, receipt, + soft_limit_reached: false, + }) + } + + /// Creates a `TransactionResult` backed by `TransactionSuccess` with a soft limit reached. + /// This method logs "transaction success" as a side effect. + pub fn success_with_soft_limit( + transaction: &StacksTransaction, + fee: u64, + receipt: StacksTransactionReceipt, + soft_limit_reached: bool, + ) -> TransactionResult { + Self::log_transaction_success(transaction); + Self::Success(TransactionSuccess { + tx: transaction.clone(), + fee, + receipt, + soft_limit_reached, }) } @@ -499,14 +520,18 @@ impl TransactionResult { pub fn convert_to_event(&self) -> TransactionEvent { match &self { - TransactionResult::Success(TransactionSuccess { tx, fee, receipt }) => { - TransactionEvent::Success(TransactionSuccessEvent { - txid: tx.txid(), - fee: *fee, - execution_cost: receipt.execution_cost.clone(), - result: receipt.result.clone(), - }) - } + TransactionResult::Success(TransactionSuccess { + tx, + fee, + receipt, + soft_limit_reached, + }) => TransactionEvent::Success(TransactionSuccessEvent { + txid: tx.txid(), + fee: *fee, + execution_cost: receipt.execution_cost.clone(), + result: receipt.result.clone(), + soft_limit_reached: *soft_limit_reached, + }), TransactionResult::ProcessingError(TransactionError { tx, error }) => { TransactionEvent::ProcessingError(TransactionErrorEvent { txid: tx.txid(), @@ -540,11 +565,7 @@ impl TransactionResult { /// Otherwise crashes. pub fn unwrap(self) -> (u64, StacksTransactionReceipt) { match self { - TransactionResult::Success(TransactionSuccess { - tx: _, - fee, - receipt, - }) => (fee, receipt), + TransactionResult::Success(TransactionSuccess { fee, receipt, .. }) => (fee, receipt), _ => panic!("Tried to `unwrap` a non-success result."), } } @@ -2366,7 +2387,12 @@ impl StacksBlockBuilder { let result_event = tx_result.convert_to_event(); match tx_result { - TransactionResult::Success(TransactionSuccess { receipt, .. }) => { + TransactionResult::Success(TransactionSuccess { + tx: _, + fee: _, + receipt, + soft_limit_reached, + }) => { if txinfo.metadata.time_estimate_ms.is_none() { // use i64 to avoid running into issues when storing in // rusqlite. @@ -2404,6 +2430,18 @@ impl StacksBlockBuilder { { mined_sponsor_nonces.insert(sponsor_addr, sponsor_nonce); } + if soft_limit_reached { + // done mining -- our soft limit execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!( + "Soft block budget exceeded on tx {}", + &txinfo.tx.txid() + ); + if block_limit_hit != BlockLimitFunction::CONTRACT_LIMIT_HIT { + debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } + } } TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index bf2b5aff57..46ff54924b 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -537,10 +537,13 @@ pub struct MemPoolWalkSettings { pub txs_to_consider: HashSet, /// Origins for transactions that we'll consider pub filter_origins: HashSet, + /// What percentage of the remaining cost limit should we consume before stopping the walk + /// None means we consume the entire cost limit ASAP + pub tenure_cost_limit_per_block_percentage: Option, } -impl MemPoolWalkSettings { - pub fn default() -> MemPoolWalkSettings { +impl Default for MemPoolWalkSettings { + fn default() -> Self { MemPoolWalkSettings { max_walk_time_ms: u64::MAX, consider_no_estimate_tx_prob: 5, @@ -554,8 +557,11 @@ impl MemPoolWalkSettings { .into_iter() .collect(), filter_origins: HashSet::new(), + tenure_cost_limit_per_block_percentage: None, } } +} +impl MemPoolWalkSettings { pub fn zero() -> MemPoolWalkSettings { MemPoolWalkSettings { max_walk_time_ms: u64::MAX, @@ -570,6 +576,7 @@ impl MemPoolWalkSettings { .into_iter() .collect(), filter_origins: HashSet::new(), + tenure_cost_limit_per_block_percentage: None, } } } diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 517105515c..15516c9050 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -464,6 +464,7 @@ impl NakamotoBlockProposal { tenure_change, coinbase, self.block.header.pox_treatment.len(), + None, )?; let mut miner_tenure_info = diff --git a/stackslib/src/net/api/tests/postblock_proposal.rs b/stackslib/src/net/api/tests/postblock_proposal.rs index 4f553efd21..60edb63540 100644 --- a/stackslib/src/net/api/tests/postblock_proposal.rs +++ b/stackslib/src/net/api/tests/postblock_proposal.rs @@ -281,6 +281,7 @@ fn test_try_make_response() { None, None, 8, + None, ) .unwrap(); diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 47e3baafc5..18c07e8953 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -90,6 +90,7 @@ const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; #[derive(Clone, Deserialize, Default, Debug)] #[serde(deny_unknown_fields)] @@ -1076,6 +1077,8 @@ impl Config { candidate_retry_cache_size: miner_config.candidate_retry_cache_size, txs_to_consider: miner_config.txs_to_consider, filter_origins: miner_config.filter_origins, + tenure_cost_limit_per_block_percentage: miner_config + .tenure_cost_limit_per_block_percentage, }, miner_status, confirm_microblocks: false, @@ -1116,6 +1119,8 @@ impl Config { candidate_retry_cache_size: miner_config.candidate_retry_cache_size, txs_to_consider: miner_config.txs_to_consider, filter_origins: miner_config.filter_origins, + tenure_cost_limit_per_block_percentage: miner_config + .tenure_cost_limit_per_block_percentage, }, miner_status, confirm_microblocks: true, @@ -2148,6 +2153,8 @@ pub struct MinerConfig { pub subsequent_rejection_pause_ms: u64, /// Duration to wait for a Nakamoto block after seeing a burnchain block before submitting a block commit. pub block_commit_delay: Duration, + /// The percentage of the remaining tenure cost limit to consume each block. + pub tenure_cost_limit_per_block_percentage: Option, } impl Default for MinerConfig { @@ -2181,6 +2188,9 @@ impl Default for MinerConfig { first_rejection_pause_ms: DEFAULT_FIRST_REJECTION_PAUSE_MS, subsequent_rejection_pause_ms: DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS, block_commit_delay: Duration::from_millis(DEFAULT_BLOCK_COMMIT_DELAY_MS), + tenure_cost_limit_per_block_percentage: Some( + DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE, + ), } } } @@ -2551,6 +2561,7 @@ pub struct MinerConfigFile { pub first_rejection_pause_ms: Option, pub subsequent_rejection_pause_ms: Option, pub block_commit_delay_ms: Option, + pub tenure_cost_limit_per_block_percentage: Option, } impl MinerConfigFile { @@ -2561,6 +2572,22 @@ impl MinerConfigFile { .map(|x| Secp256k1PrivateKey::from_hex(x)) .transpose()?; let pre_nakamoto_mock_signing = mining_key.is_some(); + + let tenure_cost_limit_per_block_percentage = + if let Some(percentage) = self.tenure_cost_limit_per_block_percentage { + if percentage == 100 { + None + } else if percentage > 0 && percentage < 100 { + Some(percentage) + } else { + return Err( + "miner.tenure_cost_limit_per_block_percentage must be between 1 and 100" + .to_string(), + ); + } + } else { + miner_default_config.tenure_cost_limit_per_block_percentage + }; Ok(MinerConfig { first_attempt_time_ms: self .first_attempt_time_ms @@ -2667,6 +2694,7 @@ impl MinerConfigFile { first_rejection_pause_ms: self.first_rejection_pause_ms.unwrap_or(miner_default_config.first_rejection_pause_ms), subsequent_rejection_pause_ms: self.subsequent_rejection_pause_ms.unwrap_or(miner_default_config.subsequent_rejection_pause_ms), block_commit_delay: self.block_commit_delay_ms.map(Duration::from_millis).unwrap_or(miner_default_config.block_commit_delay), + tenure_cost_limit_per_block_percentage, }) } } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 6ae34fce42..ba2eca7d31 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -102,11 +102,11 @@ use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ call_read_only, get_account, get_account_result, get_chain_info_opt, get_chain_info_result, get_neighbors, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, - test_observer, wait_for_runloop, + submit_tx_fallible, test_observer, wait_for_runloop, }; use crate::tests::{ - gen_random_port, get_chain_info, make_contract_publish, make_contract_publish_versioned, - make_stacks_transfer, to_addr, + gen_random_port, get_chain_info, make_contract_call, make_contract_publish, + make_contract_publish_versioned, make_stacks_transfer, to_addr, }; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -2820,6 +2820,7 @@ fn block_proposal_api_endpoint() { tenure_change, coinbase, 1, + None, ) .expect("Failed to build Nakamoto block"); @@ -5241,6 +5242,7 @@ fn check_block_heights() { 3 * deploy_fee + (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, ); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let stacker_sk = setup_stacker(&mut naka_conf); @@ -5983,6 +5985,7 @@ fn clarity_burn_state() { deploy_fee + tx_fee * tenure_count + tx_fee * tenure_count * inter_blocks_per_tenure, ); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let stacker_sk = setup_stacker(&mut naka_conf); test_observer::spawn(); @@ -7737,6 +7740,7 @@ fn check_block_info() { naka_conf.burnchain.chain_id = CHAIN_ID_TESTNET + 1; let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -8703,6 +8707,7 @@ fn mock_mining() { let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.node.pox_sync_sample_secs = 30; + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let sender_sk = Secp256k1PrivateKey::new(); let sender_signer_sk = Secp256k1PrivateKey::new(); let sender_signer_addr = tests::to_addr(&sender_signer_sk); @@ -9319,6 +9324,7 @@ fn skip_mining_long_tx() { naka_conf.node.prometheus_bind = Some(prom_bind.clone()); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); naka_conf.miner.nakamoto_attempt_time_ms = 5_000; + naka_conf.miner.tenure_cost_limit_per_block_percentage = None; let sender_1_sk = Secp256k1PrivateKey::from_seed(&[30]); let sender_2_sk = Secp256k1PrivateKey::from_seed(&[31]); // setup sender + recipient for a test stx transfer @@ -9495,3 +9501,303 @@ fn skip_mining_long_tx() { run_loop_thread.join().unwrap(); } + +#[test] +#[ignore] +/// This test is testing that the clarity cost spend down works as expected, +/// spreading clarity contract calls across the tenure instead of all in the first block. +/// It also ensures that the clarity cost resets at the start of each tenure. +fn clarity_cost_spend_down() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + let num_signers = 30; + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); + let sender_sks: Vec<_> = (0..num_signers) + .map(|_| Secp256k1PrivateKey::new()) + .collect(); + let sender_signer_sks: Vec<_> = (0..num_signers) + .map(|_| Secp256k1PrivateKey::new()) + .collect(); + let sender_signer_addrs: Vec<_> = sender_signer_sks.iter().map(tests::to_addr).collect(); + let sender_addrs: Vec<_> = sender_sks.iter().map(tests::to_addr).collect(); + let deployer_sk = sender_sks[0]; + let deployer_addr = sender_addrs[0]; + let mut sender_nonces: HashMap = HashMap::new(); + + let get_and_increment_nonce = + |sender_sk: &Secp256k1PrivateKey, sender_nonces: &mut HashMap| { + let nonce = sender_nonces.get(&sender_sk.to_hex()).unwrap_or(&0); + let result = *nonce; + sender_nonces.insert(sender_sk.to_hex(), result + 1); + result + }; + let tenure_count = 5; + let nmb_txs_per_signer = 2; + let mut signers = TestSigners::new(sender_signer_sks.clone()); + // setup sender + recipient for some test stx transfers + // these are necessary for the interim blocks to get mined at all + let tx_fee = 10000; + let small_deploy_fee = 190200; + let large_deploy_fee = 570200; + let amount = + (large_deploy_fee + small_deploy_fee) + tx_fee * nmb_txs_per_signer + 100 * tenure_count; + for sender_addr in sender_addrs { + naka_conf.add_initial_balance(PrincipalData::from(sender_addr).to_string(), amount); + } + for sender_signer_addr in sender_signer_addrs { + naka_conf.add_initial_balance( + PrincipalData::from(sender_signer_addr).to_string(), + amount * 2, + ); + } + naka_conf.miner.tenure_cost_limit_per_block_percentage = Some(5); + let stacker_sks: Vec<_> = (0..num_signers) + .map(|_| setup_stacker(&mut naka_conf)) + .collect(); + + test_observer::spawn(); + test_observer::register(&mut naka_conf, &[EventKeyType::MinedBlocks]); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + naka_mined_blocks: mined_blocks, + .. + } = run_loop.counters(); + + let coord_channel = run_loop.coordinator_channels(); + + let run_loop_thread = thread::Builder::new() + .name("run_loop".into()) + .spawn(move || run_loop.start(None, 0)) + .unwrap(); + wait_for_runloop(&blocks_processed); + + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &stacker_sks, + &sender_signer_sks, + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + info!("Nakamoto miner started..."); + blind_signer(&naka_conf, &signers, proposals_submitted); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + let small_contract = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + (0..250) + .map(|_| format!("(var-get my-var)")) + .collect::>() + .join(" ") + ); + + // Create an expensive contract that will be republished multiple times + let large_contract = format!( + "(define-public (f) (begin {} (ok 1))) (begin (f))", + (0..250) + .map(|_| format!( + "(unwrap! (contract-call? '{} submit-proposal '{} \"cost-old\" '{} \"cost-new\") (err 1))", + boot_code_id("cost-voting", false), + boot_code_id("costs", false), + boot_code_id("costs", false), + )) + .collect::>() + .join(" ") + ); + + // First, lets deploy the contract + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let small_contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + large_deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &small_contract, + ); + submit_tx(&http_origin, &small_contract_tx); + let deployer_nonce = get_and_increment_nonce(&deployer_sk, &mut sender_nonces); + let large_contract_tx = make_contract_publish( + &deployer_sk, + deployer_nonce, + large_deploy_fee, + naka_conf.burnchain.chain_id, + "big-contract", + &large_contract, + ); + submit_tx(&http_origin, &large_contract_tx); + + info!("----- Submitted deploy txs, mining BTC block -----"); + + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + next_block_and(&mut btc_regtest_controller, 60, || { + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_count > blocks_before && blocks_processed > blocks_processed_before) + }) + .unwrap(); + + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let mined_before = test_observer::get_mined_nakamoto_blocks(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + info!("----- Waiting for deploy txs to be mined -----"); + wait_for(30, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed > blocks_processed_before + && test_observer::get_mined_nakamoto_blocks().len() > mined_before.len() + && commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .expect("Timed out waiting for interim blocks to be mined"); + + info!("----- Mining interim blocks -----"); + + // Mine `tenure_count` nakamoto tenures + for tenure_ix in 0..tenure_count { + info!("Mining tenure {tenure_ix}"); + // Wait for the tenure change payload to be mined + let blocks_before = mined_blocks.load(Ordering::SeqCst); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + let commits_before = commits_submitted.load(Ordering::SeqCst); + next_block_and(&mut btc_regtest_controller, 60, || { + let blocks_count = mined_blocks.load(Ordering::SeqCst); + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_count > blocks_before + && blocks_processed > blocks_processed_before + && commits_submitted.load(Ordering::SeqCst) > commits_before) + }) + .unwrap(); + + // mine the interim blocks + let mined_before = test_observer::get_mined_nakamoto_blocks(); + let blocks_processed_before = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + // Pause mining so we can add all our transactions to the mempool at once. + TEST_MINE_STALL.lock().unwrap().replace(true); + let mut submitted_txs = vec![]; + for _nmb_tx in 0..nmb_txs_per_signer { + for sender_sk in sender_sks.iter() { + let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); + // Fill up the mempool with contract calls + let contract_tx = make_contract_call( + &sender_sk, + sender_nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + match submit_tx_fallible(&http_origin, &contract_tx) { + Ok(txid) => { + submitted_txs.push(txid); + } + Err(_e) => { + // If we fail to submit a tx, we need to make sure we don't + // increment the nonce for this sender, so we don't end up + // skipping a tx. + sender_nonces.insert(sender_sk.to_hex(), sender_nonce); + } + } + } + } + TEST_MINE_STALL.lock().unwrap().replace(false); + wait_for(120, || { + let blocks_processed = coord_channel + .lock() + .expect("Mutex poisoned") + .get_stacks_blocks_processed(); + Ok(blocks_processed >= blocks_processed_before + 7) + }) + .expect("Timed out waiting for interim blocks to be mined"); + + let mined_after = test_observer::get_mined_nakamoto_blocks(); + let mined_blocks: Vec<_> = mined_after.iter().skip(mined_before.len()).collect(); + let total_nmb_txs = mined_after.iter().map(|b| b.tx_events.len()).sum::(); + let nmb_mined_blocks = mined_blocks.len(); + debug!( + "Mined a total of {total_nmb_txs} transactions across {nmb_mined_blocks} mined blocks" + ); + let mut last_tx_count = None; + for (i, block) in mined_blocks.into_iter().enumerate() { + let tx_count = block.tx_events.len(); + if let Some(count) = last_tx_count { + assert!( + tx_count <= count, + "Expected fewer txs to be mined each block. Last block: {count}, Current block: {tx_count}" + ); + }; + last_tx_count = Some(tx_count); + + // All but the last transaction should hit the soft limit + for (j, tx_event) in block.tx_events.iter().enumerate() { + if let TransactionEvent::Success(TransactionSuccessEvent { + soft_limit_reached, + .. + }) = tx_event + { + if i == nmb_mined_blocks - 1 || j != block.tx_events.len() - 1 { + assert!( + !soft_limit_reached, + "Expected tx to not hit the soft limit in the very last block or in any txs but the last in all other blocks" + ); + } else { + assert!(soft_limit_reached, "Expected tx to hit the soft limit."); + } + } + } + } + } + + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + + run_loop_thread.join().unwrap(); +} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 167a66f7db..a1625b45a7 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -371,8 +371,10 @@ pub mod test_observer { inner_obj } else if let Some(inner_obj) = txevent_obj.get("Skipped") { inner_obj + } else if let Some(inner_obj) = txevent_obj.get("Problematic") { + inner_obj } else { - panic!("TransactionEvent object should have one of Success, ProcessingError, or Skipped") + panic!("TransactionEvent object should have one of Success, ProcessingError, Skipped, or Problematic. Had keys: {:?}", txevent_obj.keys().map(|x| x.to_string()).collect::>()); }; inner_obj .as_object() diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 5ac25f97bb..747900ea08 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -920,7 +920,9 @@ fn forked_tenure_testing( // need) TEST_SKIP_BLOCK_BROADCAST.lock().unwrap().replace(true); }, - |_| {}, + |config| { + config.miner.tenure_cost_limit_per_block_percentage = None; + }, None, None, ); @@ -1760,6 +1762,7 @@ fn miner_forking() { config.node.pox_sync_sample_secs = 30; config.burnchain.pox_reward_length = Some(max_sortitions as u32); config.miner.block_commit_delay = Duration::from_secs(0); + config.miner.tenure_cost_limit_per_block_percentage = None; config.events_observers.retain(|listener| { let Ok(addr) = std::net::SocketAddr::from_str(&listener.endpoint) else {