diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 6e6fdfd8f7..9a6a84507e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -81,7 +81,7 @@ pub fn path_join(dir: &str, path: &str) -> String { // copy src to dest pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { - eprintln!("Copy directory {} to {}", src_dir, dest_dir); + eprintln!("Copy directory {src_dir} to {dest_dir}"); let mut dir_queue = VecDeque::new(); dir_queue.push_back("/".to_string()); @@ -91,7 +91,7 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let next_src_dir = path_join(&src_dir, &next_dir); let next_dest_dir = path_join(&dest_dir, &next_dir); - eprintln!("mkdir {}", &next_dest_dir); + eprintln!("mkdir {next_dest_dir}"); fs::create_dir_all(&next_dest_dir)?; for dirent_res in fs::read_dir(&next_src_dir)? { @@ -100,11 +100,11 @@ pub fn copy_dir(src_dir: &str, dest_dir: &str) -> Result<(), io::Error> { let md = fs::metadata(&path)?; if md.is_dir() { let frontier = path_join(&next_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("push {}", &frontier); + eprintln!("push {frontier}"); dir_queue.push_back(frontier); } else { let dest_path = path_join(&next_dest_dir, &dirent.file_name().to_str().unwrap()); - eprintln!("copy {} to {}", &path.to_str().unwrap(), &dest_path); + eprintln!("copy {} to {dest_path}", &path.to_str().unwrap()); fs::copy(path, dest_path)?; } } @@ -583,11 +583,10 @@ impl TestStacksNode { ); test_debug!( - "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {:?})", + "Miner {}: Block commit transaction builds on {},{} (parent snapshot is {parent_block_snapshot_opt:?})", miner.id, block_commit_op.parent_block_ptr, - block_commit_op.parent_vtxindex, - &parent_block_snapshot_opt + block_commit_op.parent_vtxindex ); self.commit_ops.insert( block_commit_op.block_header_hash.clone(), @@ -767,16 +766,15 @@ pub fn preprocess_stacks_block_data( { Some(sn) => sn, None => { - test_debug!("Block commit did not win sorition: {:?}", block_commit_op); + test_debug!("Block commit did not win sorition: {block_commit_op:?}"); return None; } }; // "discover" this stacks block test_debug!( - "\n\nPreprocess Stacks block {}/{} ({})", + "\n\nPreprocess Stacks block {}/{block_hash} ({})", &commit_snapshot.consensus_hash, - &block_hash, StacksBlockHeader::make_index_block_hash(&commit_snapshot.consensus_hash, &block_hash) ); let block_res = node @@ -793,8 +791,7 @@ pub fn preprocess_stacks_block_data( // "discover" this stacks microblock stream for mblock in stacks_microblocks.iter() { test_debug!( - "Preprocess Stacks microblock {}-{} (seq {})", - &block_hash, + "Preprocess Stacks microblock {block_hash}-{} (seq {})", mblock.block_hash(), mblock.header.sequence ); @@ -828,11 +825,9 @@ pub fn check_block_state_index_root( .read_block_root_hash(&index_block_hash) .unwrap(); test_debug!( - "checking {}/{} state root: expecting {}, got {}", - consensus_hash, + "checking {consensus_hash}/{} state root: expecting {}, got {state_root}", &stacks_header.block_hash(), - &stacks_header.state_index_root, - &state_root + &stacks_header.state_index_root ); state_root == stacks_header.state_index_root } @@ -888,9 +883,8 @@ pub fn check_mining_reward( let mut total: u128 = 10_000_000_000 - spent_total; test_debug!( - "Miner {} has spent {} in total so far", - &miner.origin_address().unwrap(), - spent_total + "Miner {} has spent {spent_total} in total so far", + &miner.origin_address().unwrap() ); if block_height >= MINER_REWARD_MATURITY { @@ -908,13 +902,10 @@ pub fn check_mining_reward( let reward = recipient.coinbase + anchored + (3 * streamed / 5); test_debug!( - "Miner {} received a reward {} = {} + {} + {} at block {}", + "Miner {} received a reward {reward} = {} + {anchored} + {} at block {i}", &recipient.address.to_string(), - reward, recipient.coinbase, - anchored, (3 * streamed / 5), - i ); total += reward; found = true; @@ -922,9 +913,8 @@ pub fn check_mining_reward( } if !found { test_debug!( - "Miner {} received no reward at block {}", - miner.origin_address().unwrap(), - i + "Miner {} received no reward at block {i}", + miner.origin_address().unwrap() ); } } @@ -945,11 +935,9 @@ pub fn check_mining_reward( &parent_reward.block_hash, ); test_debug!( - "Miner {} received a produced-stream reward {} from {} confirmed at {}", + "Miner {} received a produced-stream reward {parent_streamed} from {} confirmed at {confirmed_block_height}", miner.origin_address().unwrap().to_string(), - parent_streamed, - heights.get(&parent_ibh).unwrap(), - confirmed_block_height + heights.get(&parent_ibh).unwrap() ); total += parent_streamed; } @@ -967,7 +955,7 @@ pub fn check_mining_reward( return total == 0; } else { if amount != total { - test_debug!("Amount {} != {}", amount, total); + test_debug!("Amount {amount} != {total}"); return false; } return true; @@ -1091,16 +1079,14 @@ pub fn make_smart_contract_with_version( (begin (var-set bar (/ x y)) (ok (var-get bar))))"; test_debug!( - "Make smart contract block at hello-world-{}-{}", - burnchain_height, - stacks_block_height + "Make smart contract block at hello-world-{burnchain_height}-{stacks_block_height}" ); let mut tx_contract = StacksTransaction::new( TransactionVersion::Testnet, miner.as_transaction_auth().unwrap(), TransactionPayload::new_smart_contract( - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), &contract.to_string(), version, ) @@ -1140,7 +1126,7 @@ pub fn make_contract_call( miner.as_transaction_auth().unwrap(), TransactionPayload::new_contract_call( addr.clone(), - &format!("hello-world-{}-{}", burnchain_height, stacks_block_height), + &format!("hello-world-{burnchain_height}-{stacks_block_height}"), "set-bar", vec![Value::Int(arg1), Value::Int(arg2)], ) diff --git a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs index 06cc4799ff..582b46a2fd 100644 --- a/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs +++ b/testnet/stacks-node/src/burnchains/bitcoin_regtest_controller.rs @@ -135,16 +135,16 @@ pub fn addr2str(btc_addr: &BitcoinAddress) -> String { if let BitcoinAddress::Segwit(segwit_addr) = btc_addr { // regtest segwit addresses use a different hrp let s = segwit_addr.to_bech32_hrp("bcrt"); - warn!("Re-encoding {} to {}", &segwit_addr, &s); + warn!("Re-encoding {segwit_addr} to {s}"); s } else { - format!("{}", &btc_addr) + format!("{btc_addr}") } } #[cfg(not(test))] pub fn addr2str(btc_addr: &BitcoinAddress) -> String { - format!("{}", &btc_addr) + format!("{btc_addr}") } // TODO: add tests from mutation testing results #4862 @@ -317,15 +317,15 @@ impl BitcoinRegtestController { false, ); if let Err(err) = res { - error!("Unable to init block headers: {}", err); + error!("Unable to init block headers: {err}"); panic!() } let burnchain_params = burnchain_params_from_config(&config.burnchain); if network_id == BitcoinNetworkType::Mainnet && config.burnchain.epochs.is_some() { - panic!("It is an error to set custom epochs while running on Mainnet: network_id {:?} config.burnchain {:#?}", - &network_id, &config.burnchain); + panic!("It is an error to set custom epochs while running on Mainnet: network_id {network_id:?} config.burnchain {:#?}", + &config.burnchain); } let indexer_config = { @@ -462,7 +462,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::TrySyncAgain => { // try again immediately @@ -573,7 +573,7 @@ impl BitcoinRegtestController { } Err(e) => { // keep trying - error!("Unable to sync with burnchain: {}", e); + error!("Unable to sync with burnchain: {e}"); match e { burnchain_error::CoordinatorClosed => { return Err(BurnchainControllerError::CoordinatorClosed) @@ -682,7 +682,7 @@ impl BitcoinRegtestController { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -783,7 +783,7 @@ impl BitcoinRegtestController { break utxos; } Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } @@ -814,13 +814,13 @@ impl BitcoinRegtestController { utxos = match result { Ok(utxos) => utxos, Err(e) => { - error!("Bitcoin RPC failure: error listing utxos {:?}", e); + error!("Bitcoin RPC failure: error listing utxos {e:?}"); sleep_ms(5000); continue; } }; - test_debug!("Unspent for {:?}: {:?}", &filter_addresses, &utxos); + test_debug!("Unspent for {filter_addresses:?}: {utxos:?}"); if utxos.is_empty() { return None; @@ -829,20 +829,14 @@ impl BitcoinRegtestController { } } } else { - debug!( - "Got {} UTXOs for {:?}", - utxos.utxos.len(), - &filter_addresses - ); + debug!("Got {} UTXOs for {filter_addresses:?}", utxos.utxos.len(),); utxos }; let total_unspent = utxos.total_available(); if total_unspent < total_required { warn!( - "Total unspent {} < {} for {:?}", - total_unspent, - total_required, + "Total unspent {total_unspent} < {total_required} for {:?}", &pubk.to_hex() ); return None; @@ -1495,7 +1489,7 @@ impl BitcoinRegtestController { let mut txid = tx.txid().as_bytes().to_vec(); txid.reverse(); - debug!("Transaction relying on UTXOs: {:?}", utxos); + debug!("Transaction relying on UTXOs: {utxos:?}"); let txid = Txid::from_bytes(&txid[..]).unwrap(); let mut txids = previous_txids.to_vec(); txids.push(txid); @@ -1507,12 +1501,11 @@ impl BitcoinRegtestController { }; info!( - "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {})", + "Miner node: submitting leader_block_commit (txid: {}, rbf: {}, total spent: {}, size: {}, fee_rate: {fee_rate})", txid.to_hex(), ongoing_block_commit.fees.is_rbf_enabled, ongoing_block_commit.fees.total_spent(), - ongoing_block_commit.fees.final_size, - fee_rate, + ongoing_block_commit.fees.final_size ); self.ongoing_block_commit = Some(ongoing_block_commit); @@ -1551,10 +1544,7 @@ impl BitcoinRegtestController { Ok(true) ); if ongoing_tx_confirmed { - debug!( - "Was able to retrieve confirmation of ongoing burnchain TXID - {}", - txid - ); + debug!("Was able to retrieve confirmation of ongoing burnchain TXID - {txid}"); let res = self.send_block_commit_operation( epoch_id, payload, @@ -1566,7 +1556,7 @@ impl BitcoinRegtestController { ); return res; } else { - debug!("Was unable to retrieve ongoing TXID - {}", txid); + debug!("Was unable to retrieve ongoing TXID - {txid}"); }; } @@ -1715,10 +1705,9 @@ impl BitcoinRegtestController { Some(utxos) => utxos, None => { warn!( - "No UTXOs for {} ({}) in epoch {}", + "No UTXOs for {} ({}) in epoch {epoch_id}", &public_key.to_hex(), - &addr2str(&addr), - epoch_id + &addr2str(&addr) ); return Err(BurnchainControllerError::NoUTXOs); } @@ -1835,18 +1824,14 @@ impl BitcoinRegtestController { } if total_consumed < total_target { - warn!( - "Consumed total {} is less than intended spend: {}", - total_consumed, total_target - ); + warn!("Consumed total {total_consumed} is less than intended spend: {total_target}"); return false; } // Append the change output let value = total_consumed - tx_cost; debug!( - "Payments value: {:?}, total_consumed: {:?}, total_spent: {:?}", - value, total_consumed, total_target + "Payments value: {value:?}, total_consumed: {total_consumed:?}, total_spent: {total_target:?}" ); if value >= DUST_UTXO_LIMIT { let change_output = if self.config.miner.segwit && epoch_id >= StacksEpochId::Epoch21 { @@ -1939,8 +1924,8 @@ impl BitcoinRegtestController { transaction.txid() }) .map_err(|e| { - error!("Bitcoin RPC error: transaction submission failed - {:?}", e); - BurnchainControllerError::TransactionSubmissionFailed(format!("{:?}", e)) + error!("Bitcoin RPC error: transaction submission failed - {e:?}"); + BurnchainControllerError::TransactionSubmissionFailed(format!("{e:?}")) }) } @@ -1958,8 +1943,8 @@ impl BitcoinRegtestController { if debug_ctr % 10 == 0 { debug!( - "Waiting until canonical sortition height reaches {} (currently {})", - height_to_wait, canonical_sortition_tip.block_height + "Waiting until canonical sortition height reaches {height_to_wait} (currently {})", + canonical_sortition_tip.block_height ); } debug_ctr += 1; @@ -1993,7 +1978,7 @@ impl BitcoinRegtestController { /// Instruct a regtest Bitcoin node to build the next block. pub fn build_next_block(&self, num_blocks: u64) { - debug!("Generate {} block(s)", num_blocks); + debug!("Generate {num_blocks} block(s)"); let public_key_bytes = match &self.config.burnchain.local_mining_public_key { Some(public_key) => hex_bytes(public_key).expect("Invalid byte sequence"), None => panic!("Unable to make new block, mining public key"), @@ -2009,7 +1994,7 @@ impl BitcoinRegtestController { match result { Ok(_) => {} Err(e) => { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2017,7 +2002,7 @@ impl BitcoinRegtestController { #[cfg(test)] pub fn invalidate_block(&self, block: &BurnchainHeaderHash) { - info!("Invalidating block {}", &block); + info!("Invalidating block {block}"); let request = BitcoinRPCRequest { method: "invalidateblock".into(), params: vec![json!(&block.to_string())], @@ -2025,7 +2010,7 @@ impl BitcoinRegtestController { jsonrpc: "2.0".into(), }; if let Err(e) = BitcoinRPCRequest::send(&self.config, request) { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2043,7 +2028,7 @@ impl BitcoinRegtestController { BurnchainHeaderHash::from_hex(v.get("result").unwrap().as_str().unwrap()).unwrap() } Err(e) => { - error!("Bitcoin RPC failure: error invalidating block {:?}", e); + error!("Bitcoin RPC failure: error invalidating block {e:?}"); panic!(); } } @@ -2138,7 +2123,7 @@ impl BitcoinRegtestController { num_blocks.try_into().unwrap(), addr2str(&address), ) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } return; @@ -2158,7 +2143,7 @@ impl BitcoinRegtestController { if let Err(e) = BitcoinRPCRequest::generate_to_address(&self.config, 1, addr2str(&address)) { - error!("Bitcoin RPC failure: error generating block {:?}", e); + error!("Bitcoin RPC failure: error generating block {e:?}"); panic!(); } } @@ -2249,10 +2234,7 @@ impl BurnchainController for BitcoinRegtestController { // Evaluate process_exit_at_block_height setting if let Some(cap) = self.config.burnchain.process_exit_at_block_height { if burnchain_tip.block_snapshot.block_height >= cap { - info!( - "Node succesfully reached the end of the ongoing {} blocks epoch!", - cap - ); + info!("Node succesfully reached the end of the ongoing {cap} blocks epoch!"); info!("This process will automatically terminate in 30s, restart your node for participating in the next epoch."); sleep_ms(30000); std::process::exit(0); @@ -2333,8 +2315,7 @@ impl SerializedTx { } pub fn to_hex(&self) -> String { - let formatted_bytes: Vec = - self.bytes.iter().map(|b| format!("{:02x}", b)).collect(); + let formatted_bytes: Vec = self.bytes.iter().map(|b| format!("{b:02x}")).collect(); formatted_bytes.join("").to_string() } } @@ -2367,7 +2348,7 @@ impl ParsedUTXO { Some(Sha256dHash::from(&txid[..])) } Err(err) => { - warn!("Unable to get txid from UTXO {}", err); + warn!("Unable to get txid from UTXO {err}"); None } } @@ -2396,7 +2377,7 @@ impl ParsedUTXO { Some(amount) } (lhs, rhs) => { - warn!("Error while converting BTC to sat {:?} - {:?}", lhs, rhs); + warn!("Error while converting BTC to sat {lhs:?} - {rhs:?}"); None } } @@ -2409,7 +2390,7 @@ impl ParsedUTXO { let base: u64 = 10; let int_part = amount / base.pow(8); let frac_part = amount % base.pow(8); - let amount = format!("{}.{:08}", int_part, frac_part); + let amount = format!("{int_part}.{frac_part:08}"); amount } @@ -2447,13 +2428,13 @@ type RPCResult = Result; impl From for RPCError { fn from(ioe: io::Error) -> Self { - Self::Network(format!("IO Error: {:?}", &ioe)) + Self::Network(format!("IO Error: {ioe:?}")) } } impl From for RPCError { fn from(ne: NetError) -> Self { - Self::Network(format!("Net Error: {:?}", &ne)) + Self::Network(format!("Net Error: {ne:?}")) } } @@ -2466,11 +2447,11 @@ impl BitcoinRPCRequest { _ => None, }; let url = config.burnchain.get_rpc_url(wallet_id); - Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {} as a URL", url)) + Url::parse(&url).unwrap_or_else(|_| panic!("Unable to parse {url} as a URL")) }; debug!( - "BitcoinRPC builder '{}': {:?}:{:?}@{}", - &payload.method, &config.burnchain.username, &config.burnchain.password, &url + "BitcoinRPC builder '{}': {:?}:{:?}@{url}", + &payload.method, &config.burnchain.username, &config.burnchain.password ); let host = url @@ -2497,7 +2478,7 @@ impl BitcoinRPCRequest { if let (Some(username), Some(password)) = (&config.burnchain.username, &config.burnchain.password) { - let auth_token = format!("Basic {}", encode(format!("{}:{}", username, password))); + let auth_token = format!("Basic {}", encode(format!("{username}:{password}"))); request.add_header("Authorization".into(), auth_token); } request @@ -2505,15 +2486,15 @@ impl BitcoinRPCRequest { #[cfg(test)] pub fn get_raw_transaction(config: &Config, txid: &Txid) -> RPCResult { - debug!("Get raw transaction {}", txid); + debug!("Get raw transaction {txid}"); let payload = BitcoinRPCRequest { method: "getrawtransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; let res = BitcoinRPCRequest::send(config, payload)?; - debug!("Got raw transaction {}: {:?}", txid, &res); + debug!("Got raw transaction {txid}: {res:?}"); Ok(res.get("result").unwrap().as_str().unwrap().to_string()) } @@ -2521,7 +2502,7 @@ impl BitcoinRPCRequest { pub fn check_transaction_confirmed(config: &Config, txid: &Txid) -> RPCResult { let payload = BitcoinRPCRequest { method: "gettransaction".to_string(), - params: vec![format!("{}", txid).into()], + params: vec![format!("{txid}").into()], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), }; @@ -2544,7 +2525,7 @@ impl BitcoinRPCRequest { } pub fn generate_to_address(config: &Config, num_blocks: u64, address: String) -> RPCResult<()> { - debug!("Generate {} blocks to {}", num_blocks, &address); + debug!("Generate {num_blocks} blocks to {address}"); let payload = BitcoinRPCRequest { method: "generatetoaddress".to_string(), params: vec![num_blocks.into(), address.clone().into()], @@ -2553,10 +2534,7 @@ impl BitcoinRPCRequest { }; let res = BitcoinRPCRequest::send(config, payload)?; - debug!( - "Generated {} blocks to {}: {:?}", - num_blocks, &address, &res - ); + debug!("Generated {num_blocks} blocks to {address}: {res:?}"); Ok(()) } @@ -2623,7 +2601,7 @@ impl BitcoinRPCRequest { let parsed_utxo: ParsedUTXO = match serde_json::from_value(entry) { Ok(utxo) => utxo, Err(err) => { - warn!("Failed parsing UTXO: {}", err); + warn!("Failed parsing UTXO: {err}"); continue; } }; @@ -2687,7 +2665,7 @@ impl BitcoinRPCRequest { if let Some(e) = json_resp.get("error") { if !e.is_null() { - error!("Error submitting transaction: {}", json_resp); + error!("Error submitting transaction: {json_resp}"); return Err(RPCError::Bitcoind(json_resp.to_string())); } } @@ -2743,7 +2721,7 @@ impl BitcoinRPCRequest { let payload = BitcoinRPCRequest { method: "importdescriptors".to_string(), params: vec![ - json!([{ "desc": format!("addr({})#{}", &addr2str(&address), &checksum), "timestamp": 0, "internal": true }]), + json!([{ "desc": format!("addr({})#{checksum}", &addr2str(&address)), "timestamp": 0, "internal": true }]), ], id: "stacks".to_string(), jsonrpc: "2.0".to_string(), @@ -2772,7 +2750,7 @@ impl BitcoinRPCRequest { let parsed_wallet_name: String = match serde_json::from_value(entry) { Ok(wallet_name) => wallet_name, Err(err) => { - warn!("Failed parsing wallet name: {}", err); + warn!("Failed parsing wallet name: {err}"); continue; } }; @@ -3003,7 +2981,7 @@ mod tests { ) .unwrap(); - debug!("send_block_commit_operation:\n{:#?}", &block_commit); + debug!("send_block_commit_operation:\n{block_commit:#?}"); debug!("{}", &SerializedTx::new(block_commit.clone()).to_hex()); assert_eq!(block_commit.output[3].value, 323507); diff --git a/testnet/stacks-node/src/chain_data.rs b/testnet/stacks-node/src/chain_data.rs index c7fdaf6cee..cc60f964a3 100644 --- a/testnet/stacks-node/src/chain_data.rs +++ b/testnet/stacks-node/src/chain_data.rs @@ -114,8 +114,7 @@ impl MinerStats { } else { // PoX reward-phase is not active debug!( - "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", - burn_block_height; + "Block {burn_block_height} is in a prepare phase or post-PoX sunset, so no windowing will take place" ); assert_eq!(windowed_block_commits.len(), 1); @@ -196,19 +195,19 @@ impl MinerStats { .stderr(Stdio::piped()) .args(args); - debug!("Run: `{:?}`", &cmd); + debug!("Run: `{cmd:?}`"); let output = cmd .spawn() - .map_err(|e| format!("Failed to run `{}`: {:?}", &full_args, &e))? + .map_err(|e| format!("Failed to run `{full_args}`: {e:?}"))? .wait_with_output() - .map_err(|ioe| format!("Failed to run `{}`: {:?}", &full_args, &ioe))?; + .map_err(|ioe| format!("Failed to run `{full_args}`: {ioe:?}"))?; let exit_code = match output.status.code() { Some(code) => code, None => { // failed due to signal - return Err(format!("Failed to run `{}`: killed by signal", &full_args)); + return Err(format!("Failed to run `{full_args}`: killed by signal")); } }; @@ -225,8 +224,8 @@ impl MinerStats { Self::run_subprocess(&self.unconfirmed_commits_helper, all_miners)?; if exit_code != 0 { return Err(format!( - "Failed to run `{}`: exit code {}", - &self.unconfirmed_commits_helper, exit_code + "Failed to run `{}`: exit code {exit_code}", + &self.unconfirmed_commits_helper )); } @@ -234,9 +233,8 @@ impl MinerStats { let unconfirmed_commits: Vec = serde_json::from_slice(&stdout) .map_err(|e| { format!( - "Failed to decode output from `{}`: {:?}. Output was `{}`", + "Failed to decode output from `{}`: {e:?}. Output was `{}`", &self.unconfirmed_commits_helper, - &e, String::from_utf8_lossy(&stdout) ) })?; @@ -255,21 +253,20 @@ impl MinerStats { let mut decoded_pox_addrs = vec![]; for pox_addr_hex in unconfirmed_commit.pox_addrs.iter() { let Ok(pox_addr_bytes) = hex_bytes(pox_addr_hex) else { - return Err(format!("Not a hex string: `{}`", &pox_addr_hex)); + return Err(format!("Not a hex string: `{pox_addr_hex}`")); }; let Some(bitcoin_addr) = BitcoinAddress::from_scriptpubkey(BitcoinNetworkType::Mainnet, &pox_addr_bytes) else { return Err(format!( - "Not a recognized Bitcoin scriptpubkey: {}", - &pox_addr_hex + "Not a recognized Bitcoin scriptpubkey: {pox_addr_hex}" )); }; let Some(pox_addr) = PoxAddress::try_from_bitcoin_output(&BitcoinTxOutput { address: bitcoin_addr.clone(), units: 1, }) else { - return Err(format!("Not a recognized PoX address: {}", &bitcoin_addr)); + return Err(format!("Not a recognized PoX address: {bitcoin_addr}")); }; decoded_pox_addrs.push(pox_addr); } @@ -1042,7 +1039,7 @@ EOF ] { let spend = *spend_dist .get(miner) - .unwrap_or_else(|| panic!("no spend for {}", &miner)); + .unwrap_or_else(|| panic!("no spend for {miner}")); match miner.as_str() { "miner-1" => { assert_eq!(spend, 2); @@ -1057,7 +1054,7 @@ EOF assert_eq!(spend, 10); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } @@ -1075,7 +1072,7 @@ EOF ] { let prob = *win_probs .get(miner) - .unwrap_or_else(|| panic!("no probability for {}", &miner)); + .unwrap_or_else(|| panic!("no probability for {miner}")); match miner.as_str() { "miner-1" => { assert!((prob - (2.0 / 25.0)).abs() < 0.00001); @@ -1090,7 +1087,7 @@ EOF assert!((prob - (10.0 / 25.0)).abs() < 0.00001); } _ => { - panic!("unknown miner {}", &miner); + panic!("unknown miner {miner}"); } } } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 8d41d66f5c..927b71ef73 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -108,7 +108,7 @@ pub struct ConfigFile { impl ConfigFile { pub fn from_path(path: &str) -> Result { - let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {}", &e))?; + let content = fs::read_to_string(path).map_err(|e| format!("Invalid path: {e}"))?; let mut f = Self::from_str(&content)?; f.__path = Some(path.to_string()); Ok(f) @@ -117,7 +117,7 @@ impl ConfigFile { #[allow(clippy::should_implement_trait)] pub fn from_str(content: &str) -> Result { let mut config: ConfigFile = - toml::from_str(content).map_err(|e| format!("Invalid toml: {}", e))?; + toml::from_str(content).map_err(|e| format!("Invalid toml: {e}"))?; if let Some(mstx_balance) = config.mstx_balance.take() { warn!("'mstx_balance' in the config is deprecated; please use 'ustx_balance' instead."); match config.ustx_balance { @@ -393,24 +393,24 @@ impl Config { if let Some(first_burn_block_height) = self.burnchain.first_burn_block_height { debug!( - "Override first_block_height from {} to {}", - burnchain.first_block_height, first_burn_block_height + "Override first_block_height from {} to {first_burn_block_height}", + burnchain.first_block_height ); burnchain.first_block_height = first_burn_block_height; } if let Some(first_burn_block_timestamp) = self.burnchain.first_burn_block_timestamp { debug!( - "Override first_block_timestamp from {} to {}", - burnchain.first_block_timestamp, first_burn_block_timestamp + "Override first_block_timestamp from {} to {first_burn_block_timestamp}", + burnchain.first_block_timestamp ); burnchain.first_block_timestamp = first_burn_block_timestamp; } if let Some(first_burn_block_hash) = &self.burnchain.first_burn_block_hash { debug!( - "Override first_burn_block_hash from {} to {}", - burnchain.first_block_hash, first_burn_block_hash + "Override first_burn_block_hash from {} to {first_burn_block_hash}", + burnchain.first_block_hash ); burnchain.first_block_hash = BurnchainHeaderHash::from_hex(first_burn_block_hash) .expect("Invalid first_burn_block_hash"); @@ -428,8 +428,8 @@ impl Config { if let Some(v1_unlock_height) = self.burnchain.pox_2_activation { debug!( - "Override v1_unlock_height from {} to {}", - burnchain.pox_constants.v1_unlock_height, v1_unlock_height + "Override v1_unlock_height from {} to {v1_unlock_height}", + burnchain.pox_constants.v1_unlock_height ); burnchain.pox_constants.v1_unlock_height = v1_unlock_height; } @@ -511,16 +511,16 @@ impl Config { if let Some(sunset_start) = self.burnchain.sunset_start { debug!( - "Override sunset_start from {} to {}", - burnchain.pox_constants.sunset_start, sunset_start + "Override sunset_start from {} to {sunset_start}", + burnchain.pox_constants.sunset_start ); burnchain.pox_constants.sunset_start = sunset_start.into(); } if let Some(sunset_end) = self.burnchain.sunset_end { debug!( - "Override sunset_end from {} to {}", - burnchain.pox_constants.sunset_end, sunset_end + "Override sunset_end from {} to {sunset_end}", + burnchain.pox_constants.sunset_end ); burnchain.pox_constants.sunset_end = sunset_end.into(); } @@ -595,7 +595,7 @@ impl Config { match Burnchain::new(&working_dir, &self.burnchain.chain, &network_name) { Ok(burnchain) => burnchain, Err(e) => { - error!("Failed to instantiate burnchain: {}", e); + error!("Failed to instantiate burnchain: {e}"); panic!() } } @@ -621,7 +621,7 @@ impl Config { assert!( v1_unlock_height > epoch21.start_height, - "FATAL: v1 unlock height occurs at or before pox-2 activation: {} <= {}\nburnchain: {:?}", v1_unlock_height, epoch21.start_height, burnchain + "FATAL: v1 unlock height occurs at or before pox-2 activation: {v1_unlock_height} <= {}\nburnchain: {burnchain:?}", epoch21.start_height ); let epoch21_rc = burnchain @@ -636,8 +636,7 @@ impl Config { // the reward cycle boundary. assert!( !burnchain.is_reward_cycle_start(v1_unlock_height), - "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {:?}", - burnchain + "FATAL: v1 unlock height is at a reward cycle boundary\nburnchain: {burnchain:?}" ); } } @@ -679,7 +678,7 @@ impl Config { } else if epoch_name == EPOCH_CONFIG_3_0_0 { Ok(StacksEpochId::Epoch30) } else { - Err(format!("Unknown epoch name specified: {}", epoch_name)) + Err(format!("Unknown epoch name specified: {epoch_name}")) }?; matched_epochs.push((epoch_id, configured_epoch.start_height)); } @@ -710,9 +709,7 @@ impl Config { .zip(matched_epochs.iter().map(|(epoch_id, _)| epoch_id)) { if expected_epoch != configured_epoch { - return Err(format!( - "Configured epochs may not skip an epoch. Expected epoch = {}, Found epoch = {}", - expected_epoch, configured_epoch)); + return Err(format!("Configured epochs may not skip an epoch. Expected epoch = {expected_epoch}, Found epoch = {configured_epoch}")); } } @@ -732,8 +729,8 @@ impl Config { for (i, (epoch_id, start_height)) in matched_epochs.iter().enumerate() { if epoch_id != &out_epochs[i].epoch_id { return Err( - format!("Unmatched epochs in configuration and node implementation. Implemented = {}, Configured = {}", - epoch_id, &out_epochs[i].epoch_id)); + format!("Unmatched epochs in configuration and node implementation. Implemented = {epoch_id}, Configured = {}", + &out_epochs[i].epoch_id)); } // end_height = next epoch's start height || i64::max if last epoch let end_height = if i + 1 < matched_epochs.len() { @@ -759,7 +756,7 @@ impl Config { .find(|&e| e.epoch_id == StacksEpochId::Epoch21) .ok_or("Cannot configure pox_2_activation if epoch 2.1 is not configured")?; if last_epoch.start_height > pox_2_activation as u64 { - Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {}, epoch 2.1 start height = {}", pox_2_activation, last_epoch.start_height))?; + Err(format!("Cannot configure pox_2_activation at a lower height than the Epoch 2.1 start height. pox_2_activation = {pox_2_activation}, epoch 2.1 start height = {}", last_epoch.start_height))?; } } @@ -1267,14 +1264,11 @@ impl BurnchainConfig { false => "http://", }; let wallet_path = if let Some(wallet_id) = wallet.as_ref() { - format!("/wallet/{}", wallet_id) + format!("/wallet/{wallet_id}") } else { "".to_string() }; - format!( - "{}{}:{}{}", - scheme, self.peer_host, self.rpc_port, wallet_path - ) + format!("{scheme}{}:{}{wallet_path}", self.peer_host, self.rpc_port) } pub fn get_rpc_socket_addr(&self) -> SocketAddr { @@ -1505,15 +1499,14 @@ impl BurnchainConfigFile { // Using std::net::LookupHost would be preferable, but it's // unfortunately unstable at this point. // https://doc.rust-lang.org/1.6.0/std/net/struct.LookupHost.html - let mut sock_addrs = format!("{}:1", &peer_host) + let mut sock_addrs = format!("{peer_host}:1") .to_socket_addrs() - .map_err(|e| format!("Invalid burnchain.peer_host: {}", &e))?; + .map_err(|e| format!("Invalid burnchain.peer_host: {e}"))?; let sock_addr = match sock_addrs.next() { Some(addr) => addr, None => { return Err(format!( - "No IP address could be queried for '{}'", - &peer_host + "No IP address could be queried for '{peer_host}'" )); } }; @@ -1710,10 +1703,7 @@ impl CostEstimatorName { if &s.to_lowercase() == "naive_pessimistic" { CostEstimatorName::NaivePessimistic } else { - panic!( - "Bad cost estimator name supplied in configuration file: {}", - s - ); + panic!("Bad cost estimator name supplied in configuration file: {s}"); } } } @@ -1725,10 +1715,7 @@ impl FeeEstimatorName { } else if &s.to_lowercase() == "fuzzed_weighted_median_fee_rate" { FeeEstimatorName::FuzzedWeightedMedianFeeRate } else { - panic!( - "Bad fee estimator name supplied in configuration file: {}", - s - ); + panic!("Bad fee estimator name supplied in configuration file: {s}"); } } } @@ -1738,7 +1725,7 @@ impl CostMetricName { if &s.to_lowercase() == "proportion_dot_product" { CostMetricName::ProportionDotProduct } else { - panic!("Bad cost metric name supplied in configuration file: {}", s); + panic!("Bad cost metric name supplied in configuration file: {s}"); } } } @@ -1908,7 +1895,7 @@ impl Default for NodeConfig { rng.fill_bytes(&mut buf); let now = get_epoch_time_ms(); - let testnet_id = format!("stacks-node-{}", now); + let testnet_id = format!("stacks-node-{now}"); let rpc_port = 20443; let p2p_port = 20444; @@ -1923,11 +1910,11 @@ impl Default for NodeConfig { NodeConfig { name: name.to_string(), seed: seed.to_vec(), - working_dir: format!("/tmp/{}", testnet_id), - rpc_bind: format!("0.0.0.0:{}", rpc_port), - p2p_bind: format!("0.0.0.0:{}", p2p_port), - data_url: format!("http://127.0.0.1:{}", rpc_port), - p2p_address: format!("127.0.0.1:{}", rpc_port), + working_dir: format!("/tmp/{testnet_id}"), + rpc_bind: format!("0.0.0.0:{rpc_port}"), + p2p_bind: format!("0.0.0.0:{p2p_port}"), + data_url: format!("http://127.0.0.1:{rpc_port}"), + p2p_address: format!("127.0.0.1:{rpc_port}"), bootstrap_node: vec![], deny_nodes: vec![], local_peer_seed: local_peer_seed.to_vec(), @@ -2015,15 +2002,12 @@ impl NodeConfig { pub fn add_bootstrap_node(&mut self, bootstrap_node: &str, chain_id: u32, peer_version: u32) { let parts: Vec<&str> = bootstrap_node.split('@').collect(); if parts.len() != 2 { - panic!( - "Invalid bootstrap node '{}': expected PUBKEY@IP:PORT", - bootstrap_node - ); + panic!("Invalid bootstrap node '{bootstrap_node}': expected PUBKEY@IP:PORT"); } let (pubkey_str, hostport) = (parts[0], parts[1]); let pubkey = Secp256k1PublicKey::from_hex(pubkey_str) .unwrap_or_else(|_| panic!("Invalid public key '{pubkey_str}'")); - debug!("Resolve '{}'", &hostport); + debug!("Resolve '{hostport}'"); let mut attempts = 0; let max_attempts = 5; @@ -2035,22 +2019,16 @@ impl NodeConfig { if let Some(addr) = addrs.next() { break addr; } else { - panic!("No addresses found for '{}'", hostport); + panic!("No addresses found for '{hostport}'"); } } Err(e) => { if attempts >= max_attempts { - panic!( - "Failed to resolve '{}' after {} attempts: {}", - hostport, max_attempts, e - ); + panic!("Failed to resolve '{hostport}' after {max_attempts} attempts: {e}"); } else { error!( - "Attempt {} - Failed to resolve '{}': {}. Retrying in {:?}...", + "Attempt {} - Failed to resolve '{hostport}': {e}. Retrying in {delay:?}...", attempts + 1, - hostport, - e, - delay ); thread::sleep(delay); attempts += 1; @@ -2261,7 +2239,7 @@ impl ConnectionOptionsFile { public_ip_address .parse::() .map(|addr| (PeerAddress::from_socketaddr(&addr), addr.port())) - .map_err(|e| format!("Invalid connection_option.public_ip_address: {}", e)) + .map_err(|e| format!("Invalid connection_option.public_ip_address: {e}")) }) .transpose()?; let mut read_only_call_limit = HELIUM_DEFAULT_CONNECTION_OPTIONS @@ -2645,7 +2623,7 @@ impl MinerConfigFile { |txs_to_consider_str| match str::parse(txs_to_consider_str) { Ok(txtype) => txtype, Err(e) => { - panic!("could not parse '{}': {}", &txs_to_consider_str, &e); + panic!("could not parse '{txs_to_consider_str}': {e}"); } }, ) @@ -2661,7 +2639,7 @@ impl MinerConfigFile { .map(|origin_str| match StacksAddress::from_string(origin_str) { Some(addr) => addr, None => { - panic!("could not parse '{}' into a Stacks address", origin_str); + panic!("could not parse '{origin_str}' into a Stacks address"); } }) .collect() @@ -2999,7 +2977,7 @@ mod tests { "#, ) .unwrap_err(); - println!("{}", err); + println!("{err}"); assert!(err.starts_with("Invalid toml: unknown field `unknown_field`")); } @@ -3020,7 +2998,7 @@ mod tests { fn test_example_confs() { // For each config file in the ../conf/ directory, we should be able to parse it let conf_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("conf"); - println!("Reading config files from: {:?}", conf_dir); + println!("Reading config files from: {conf_dir:?}"); let conf_files = fs::read_dir(conf_dir).unwrap(); for entry in conf_files { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index dd587077a6..88bfc8dae7 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -380,7 +380,7 @@ impl EventObserver { } Err(err) => { // Log the error, then retry after a delay - warn!("Failed to insert payload into event observer database: {:?}", err; + warn!("Failed to insert payload into event observer database: {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -463,7 +463,7 @@ impl EventObserver { ); let url = Url::parse(full_url) - .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {} as a URL", full_url)); + .unwrap_or_else(|_| panic!("Event dispatcher: unable to parse {full_url} as a URL")); let host = url.host_str().expect("Invalid URL: missing host"); let port = url.port_or_known_default().unwrap_or(80); @@ -500,8 +500,7 @@ impl EventObserver { } Err(err) => { warn!( - "Event dispatcher: connection or request failed to {}:{} - {:?}", - &host, &port, err; + "Event dispatcher: connection or request failed to {host}:{port} - {err:?}"; "backoff" => ?backoff, "attempts" => attempts ); @@ -555,11 +554,11 @@ impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { // Construct the full URL let url_str = if path.starts_with('/') { - format!("{}{}", &self.endpoint, path) + format!("{}{path}", &self.endpoint) } else { - format!("{}/{}", &self.endpoint, path) + format!("{}/{path}", &self.endpoint) }; - let full_url = format!("http://{}", url_str); + let full_url = format!("http://{url_str}"); if let Some(db_path) = &self.db_path { let conn = @@ -610,7 +609,7 @@ impl EventObserver { .collect(); json!({ - "burn_block_hash": format!("0x{}", burn_block), + "burn_block_hash": format!("0x{burn_block}"), "burn_block_height": burn_block_height, "reward_recipients": serde_json::Value::Array(reward_recipients), "reward_slot_holders": serde_json::Value::Array(reward_slot_holders), @@ -747,10 +746,10 @@ impl EventObserver { .collect(); let payload = json!({ - "parent_index_block_hash": format!("0x{}", parent_index_block_hash), + "parent_index_block_hash": format!("0x{parent_index_block_hash}"), "events": serialized_events, "transactions": serialized_txs, - "burn_block_hash": format!("0x{}", burn_block_hash), + "burn_block_hash": format!("0x{burn_block_hash}"), "burn_block_height": burn_block_height, "burn_block_timestamp": burn_block_timestamp, }); @@ -845,17 +844,17 @@ impl EventObserver { "block_time": block_timestamp, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, - "miner_txid": format!("0x{}", winner_txid), + "miner_txid": format!("0x{winner_txid}"), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), "parent_block_hash": format!("0x{}", block.parent_block_hash), - "parent_index_block_hash": format!("0x{}", parent_index_hash), + "parent_index_block_hash": format!("0x{parent_index_hash}"), "parent_microblock": format!("0x{}", block.parent_microblock_hash), "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, - "parent_burn_block_hash": format!("0x{}", parent_burn_block_hash), + "parent_burn_block_hash": format!("0x{parent_burn_block_hash}"), "parent_burn_block_height": parent_burn_block_height, "parent_burn_block_timestamp": parent_burn_block_timestamp, "anchored_cost": anchored_consumed, @@ -1540,8 +1539,7 @@ impl EventDispatcher { modified_slots: Vec, ) { debug!( - "event_dispatcher: New StackerDB chunk events for {}: {:?}", - contract_id, modified_slots + "event_dispatcher: New StackerDB chunk events for {contract_id}: {modified_slots:?}" ); let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); @@ -1582,7 +1580,7 @@ impl EventDispatcher { let dropped_txids: Vec<_> = txs .into_iter() - .map(|tx| serde_json::Value::String(format!("0x{}", &tx))) + .map(|tx| serde_json::Value::String(format!("0x{tx}"))) .collect(); let payload = json!({ @@ -1875,8 +1873,7 @@ mod test { // Assert that the connection attempt timed out assert!( result.is_err(), - "Expected a timeout error, but got {:?}", - result + "Expected a timeout error, but got {result:?}" ); assert_eq!( result.unwrap_err().kind(), @@ -2134,7 +2131,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let request = server.recv().unwrap(); assert_eq!(request.url(), "/test"); @@ -2149,7 +2146,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2168,7 +2165,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; while let Ok(request) = server.recv() { @@ -2198,7 +2195,7 @@ mod test { }); let observer = - EventObserver::new(None, format!("127.0.0.1:{}", port), Duration::from_secs(3)); + EventObserver::new(None, format!("127.0.0.1:{port}"), Duration::from_secs(3)); let payload = json!({"key": "value"}); @@ -2218,7 +2215,7 @@ mod test { let (tx, rx) = channel(); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2242,7 +2239,7 @@ mod test { } }); - let observer = EventObserver::new(None, format!("127.0.0.1:{}", port), timeout); + let observer = EventObserver::new(None, format!("127.0.0.1:{port}"), timeout); let payload = json!({"key": "value"}); @@ -2255,7 +2252,7 @@ mod test { // Record the time after the function returns let elapsed_time = start_time.elapsed(); - println!("Elapsed time: {:?}", elapsed_time); + println!("Elapsed time: {elapsed_time:?}"); assert!( elapsed_time >= timeout, "Expected a timeout, but the function returned too quickly" @@ -2281,9 +2278,9 @@ mod test { // Set up a channel to notify when the server has processed the request let (tx, rx) = channel(); - info!("Starting mock server on port {}", port); + info!("Starting mock server on port {port}"); // Start a mock server in a separate thread - let server = Server::http(format!("127.0.0.1:{}", port)).unwrap(); + let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; let mut _request_holder = None; @@ -2334,7 +2331,7 @@ mod test { let observer = EventObserver::new( Some(working_dir.clone()), - format!("127.0.0.1:{}", port), + format!("127.0.0.1:{port}"), timeout, ); diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 3e527e76e4..c285c6a168 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -284,8 +284,7 @@ impl Globals { **leader_key_registration_state { info!( - "Received burnchain block #{} including key_register_op - {}", - burn_block_height, txid + "Received burnchain block #{burn_block_height} including key_register_op - {txid}" ); if txid == op.txid { let active_key = RegisteredKey { @@ -302,8 +301,8 @@ impl Globals { activated_key = Some(active_key); } else { debug!( - "key_register_op {} does not match our pending op {}", - txid, &op.txid + "key_register_op {txid} does not match our pending op {}", + &op.txid ); } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index 9402ebbad5..4e85750880 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -361,7 +361,7 @@ mod tests { let vrf_sk = match self.vrf_map.get(vrf_pk) { Some(vrf_pk) => vrf_pk, None => { - warn!("No VRF secret key on file for {:?}", vrf_pk); + warn!("No VRF secret key on file for {vrf_pk:?}"); return None; } }; diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index e795101c94..4fa1c5e5a7 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -63,11 +63,11 @@ static GLOBAL: Jemalloc = Jemalloc; /// Implmentation of `pick_best_tip` CLI option fn cli_pick_best_tip(config_path: &str, at_stacks_height: Option) -> TipCandidate { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -103,11 +103,11 @@ fn cli_get_miner_spend( mine_start: Option, at_burnchain_height: Option, ) -> u64 { - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config = match ConfigFile::from_path(config_path) { Ok(config_file) => Config::from_config_file(config_file, true).unwrap(), Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -181,7 +181,7 @@ fn cli_get_miner_spend( .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(burn_block_height + 1, &active_miners) @@ -195,10 +195,7 @@ fn cli_get_miner_spend( .map(|cmt| (format!("{}", &cmt.apparent_sender), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -231,10 +228,10 @@ fn cli_get_miner_spend( MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - config.miner.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + config.miner.fast_rampup ); let miner_addrs = BlockMinerThread::get_miner_addrs(&config, &keychain); @@ -245,8 +242,8 @@ fn cli_get_miner_spend( .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); win_prob }, @@ -257,9 +254,9 @@ fn cli_get_miner_spend( fn main() { panic::set_hook(Box::new(|panic_info| { - error!("Process abort due to thread panic: {}", panic_info); + error!("Process abort due to thread panic: {panic_info}"); let bt = Backtrace::new(); - error!("Panic backtrace: {:?}", &bt); + error!("Panic backtrace: {bt:?}"); // force a core dump #[cfg(unix)] @@ -287,10 +284,7 @@ fn main() { .expect("Failed to parse --mine-at-height argument"); if let Some(mine_start) = mine_start { - info!( - "Will begin mining once Stacks chain has synced to height >= {}", - mine_start - ); + info!("Will begin mining once Stacks chain has synced to height >= {mine_start}"); } let config_file = match subcommand.as_str() { @@ -313,14 +307,14 @@ fn main() { "check-config" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); let config_file = match ConfigFile::from_path(&config_path) { Ok(config_file) => { - debug!("Loaded config file: {:?}", config_file); + debug!("Loaded config file: {config_file:?}"); config_file } Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } }; @@ -330,7 +324,7 @@ fn main() { process::exit(0); } Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -338,11 +332,11 @@ fn main() { "start" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish(); - info!("Loading config at path {}", config_path); + info!("Loading config at path {config_path}"); match ConfigFile::from_path(&config_path) { Ok(config_file) => config_file, Err(e) => { - warn!("Invalid config file: {}", e); + warn!("Invalid config file: {e}"); process::exit(1); } } @@ -389,7 +383,7 @@ fn main() { args.finish(); let best_tip = cli_pick_best_tip(&config_path, at_stacks_height); - println!("Best tip is {:?}", &best_tip); + println!("Best tip is {best_tip:?}"); process::exit(0); } "get-spend-amount" => { @@ -399,7 +393,7 @@ fn main() { args.finish(); let spend_amount = cli_get_miner_spend(&config_path, mine_start, at_burnchain_height); - println!("Will spend {}", spend_amount); + println!("Will spend {spend_amount}"); process::exit(0); } _ => { @@ -411,7 +405,7 @@ fn main() { let conf = match Config::from_config_file(config_file, true) { Ok(conf) => conf, Err(e) => { - warn!("Invalid config: {}", e); + warn!("Invalid config: {e}"); process::exit(1); } }; @@ -425,7 +419,7 @@ fn main() { if conf.burnchain.mode == "helium" || conf.burnchain.mode == "mocknet" { let mut run_loop = helium::RunLoop::new(conf); if let Err(e) = run_loop.start(num_round) { - warn!("Helium runloop exited: {}", e); + warn!("Helium runloop exited: {e}"); } } else if conf.burnchain.mode == "neon" || conf.burnchain.mode == "nakamoto-neon" diff --git a/testnet/stacks-node/src/monitoring/prometheus.rs b/testnet/stacks-node/src/monitoring/prometheus.rs index e9705142d0..f91ac53bb4 100644 --- a/testnet/stacks-node/src/monitoring/prometheus.rs +++ b/testnet/stacks-node/src/monitoring/prometheus.rs @@ -20,10 +20,7 @@ pub fn start_serving_prometheus_metrics(bind_address: String) -> Result<(), Moni warn!("Prometheus monitoring: unable to get local bind address, will not spawn prometheus endpoint service."); MonitoringError::UnableToGetAddress })?; - info!( - "Prometheus monitoring: server listening on http://{}", - local_addr - ); + info!("Prometheus monitoring: server listening on http://{local_addr}"); let mut incoming = listener.incoming(); while let Some(stream) = incoming.next().await { diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 56156f4c20..edaf12e98b 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -131,7 +131,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -308,13 +308,13 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -359,7 +359,7 @@ impl StacksNode { } pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) { - info!("Activated VRF key; saving to {}", path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); @@ -369,15 +369,15 @@ pub(crate) fn save_activated_vrf_key(path: &str, activated_key: &RegisteredKey) let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return; } }; if let Err(e) = f.write_all(key_json.as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + warn!("Failed to write activated VRF key to {path}: {e:?}"); return; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 042df70be1..1ab9e77f97 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -356,7 +356,7 @@ impl BlockMinerThread { // try again, in case a new sortition is pending self.globals - .raise_initiative(format!("MiningFailure: {:?}", &e)); + .raise_initiative(format!("MiningFailure: {e:?}")); return Err(NakamotoNodeError::MiningFailure( ChainstateError::MinerAborted, )); @@ -648,14 +648,14 @@ impl BlockMinerThread { } let block_id = block.block_id(); - debug!("Broadcasting block {}", &block_id); + debug!("Broadcasting block {block_id}"); if let Err(e) = self.p2p_handle.broadcast_message( vec![], StacksMessageType::NakamotoBlocks(NakamotoBlocksData { blocks: vec![block.clone()], }), ) { - warn!("Failed to broadcast block {}: {:?}", &block_id, &e); + warn!("Failed to broadcast block {block_id}: {e:?}"); } Ok(()) } @@ -801,7 +801,7 @@ impl BlockMinerThread { // load up stacks chain tip let (stacks_tip_ch, stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(burn_db.conn()).map_err(|e| { - error!("Failed to load canonical Stacks tip: {:?}", &e); + error!("Failed to load canonical Stacks tip: {e:?}"); NakamotoNodeError::ParentNotFound })?; @@ -813,8 +813,8 @@ impl BlockMinerThread { ) .map_err(|e| { error!( - "Could not query header info for tenure tip {} off of {}: {:?}", - &self.burn_election_block.consensus_hash, &stacks_tip_block_id, &e + "Could not query header info for tenure tip {} off of {stacks_tip_block_id}: {e:?}", + &self.burn_election_block.consensus_hash ); NakamotoNodeError::ParentNotFound })?; @@ -842,8 +842,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header for parent tenure ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header for parent tenure ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -858,7 +858,7 @@ impl BlockMinerThread { &parent_tenure_header.consensus_hash, ) .map_err(|e| { - error!("Could not query parent tenure finish block: {:?}", &e); + error!("Could not query parent tenure finish block: {e:?}"); NakamotoNodeError::ParentNotFound })?; if let Some(header) = header_opt { @@ -872,8 +872,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &self.parent_tenure_id) .map_err(|e| { error!( - "Could not query header info for epoch2x tenure block ID {}: {:?}", - &self.parent_tenure_id, &e + "Could not query header info for epoch2x tenure block ID {}: {e:?}", + &self.parent_tenure_id ); NakamotoNodeError::ParentNotFound })? @@ -888,9 +888,8 @@ impl BlockMinerThread { }; debug!( - "Miner: stacks tip parent header is {} {:?}", - &stacks_tip_header.index_block_hash(), - &stacks_tip_header + "Miner: stacks tip parent header is {} {stacks_tip_header:?}", + &stacks_tip_header.index_block_hash() ); let miner_address = self .keychain @@ -974,8 +973,8 @@ impl BlockMinerThread { NakamotoChainState::get_block_header(chain_state.db(), &x.header.parent_block_id) .map_err(|e| { error!( - "Could not query header info for parent block ID {}: {:?}", - &x.header.parent_block_id, &e + "Could not query header info for parent block ID {}: {e:?}", + &x.header.parent_block_id ); NakamotoNodeError::ParentNotFound })? diff --git a/testnet/stacks-node/src/nakamoto_node/peer.rs b/testnet/stacks-node/src/nakamoto_node/peer.rs index f01618a14b..3c4e6a98f4 100644 --- a/testnet/stacks-node/src/nakamoto_node/peer.rs +++ b/testnet/stacks-node/src/nakamoto_node/peer.rs @@ -251,8 +251,7 @@ impl PeerThread { let poll_ms = if !download_backpressure && self.net.has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.net.has_more_downloads() ); 1 @@ -326,7 +325,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index a42f033b20..a1ca16daa4 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -471,7 +471,7 @@ impl RelayerThread { .expect("FATAL: failed to query sortition DB"); if cur_sn.consensus_hash != consensus_hash { - info!("Relayer: Current sortition {} is ahead of processed sortition {}; taking no action", &cur_sn.consensus_hash, consensus_hash); + info!("Relayer: Current sortition {} is ahead of processed sortition {consensus_hash}; taking no action", &cur_sn.consensus_hash); self.globals .raise_initiative("process_sortition".to_string()); return Ok(None); @@ -567,15 +567,13 @@ impl RelayerThread { ) .map_err(|e| { error!( - "Relayer: Failed to get tenure-start block header for stacks tip {}: {:?}", - &stacks_tip, &e + "Relayer: Failed to get tenure-start block header for stacks tip {stacks_tip}: {e:?}" ); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { error!( - "Relayer: Failed to find tenure-start block header for stacks tip {}", - &stacks_tip + "Relayer: Failed to find tenure-start block header for stacks tip {stacks_tip}" ); NakamotoNodeError::ParentNotFound })?; @@ -588,17 +586,11 @@ impl RelayerThread { tip_block_ch, ) .map_err(|e| { - error!( - "Failed to load VRF proof for {} off of {}: {:?}", - tip_block_ch, &stacks_tip, &e - ); + error!("Failed to load VRF proof for {tip_block_ch} off of {stacks_tip}: {e:?}"); NakamotoNodeError::ParentNotFound })? .ok_or_else(|| { - error!( - "No block VRF proof for {} off of {}", - tip_block_ch, &stacks_tip - ); + error!("No block VRF proof for {tip_block_ch} off of {stacks_tip}"); NakamotoNodeError::ParentNotFound })?; @@ -611,7 +603,7 @@ impl RelayerThread { &self.burnchain, ) .map_err(|e| { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); NakamotoNodeError::SnapshotNotFoundForChainTip })?; @@ -755,8 +747,7 @@ impl RelayerThread { if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return Err(NakamotoNodeError::MissedMiningOpportunity); @@ -816,14 +807,14 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = new_miner_state.run_miner(prior_tenure_thread) { - info!("Miner thread failed: {:?}", &e); + info!("Miner thread failed: {e:?}"); Err(e) } else { Ok(()) } }) .map_err(|e| { - error!("Relayer: Failed to start tenure thread: {:?}", &e); + error!("Relayer: Failed to start tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; debug!( @@ -849,7 +840,7 @@ impl RelayerThread { .name(format!("tenure-stop-{}", self.local_peer.data_url)) .spawn(move || BlockMinerThread::stop_miner(&globals, prior_tenure_thread)) .map_err(|e| { - error!("Relayer: Failed to spawn a stop-tenure thread: {:?}", &e); + error!("Relayer: Failed to spawn a stop-tenure thread: {e:?}"); NakamotoNodeError::SpawnError(e) })?; @@ -952,7 +943,7 @@ impl RelayerThread { return true; } Err(e) => { - warn!("Relayer: process_sortition returned {:?}", &e); + warn!("Relayer: process_sortition returned {e:?}"); return false; } }; @@ -1030,14 +1021,13 @@ impl RelayerThread { let (cur_stacks_tip_ch, cur_stacks_tip_bh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).unwrap_or_else( |e| { - panic!("Failed to load canonical stacks tip: {:?}", &e); + panic!("Failed to load canonical stacks tip: {e:?}"); }, ); if cur_stacks_tip_ch != tip_block_ch || cur_stacks_tip_bh != tip_block_bh { info!( - "Stacks tip changed prior to commit: {}/{} != {}/{}", - &cur_stacks_tip_ch, &cur_stacks_tip_bh, &tip_block_ch, &tip_block_bh + "Stacks tip changed prior to commit: {cur_stacks_tip_ch}/{cur_stacks_tip_bh} != {tip_block_ch}/{tip_block_bh}" ); return Err(NakamotoNodeError::StacksTipChanged); } @@ -1047,16 +1037,12 @@ impl RelayerThread { &StacksBlockId::new(&tip_block_ch, &tip_block_bh), ) .map_err(|e| { - warn!( - "Relayer: failed to load tip {}/{}: {:?}", - &tip_block_ch, &tip_block_bh, &e - ); + warn!("Relayer: failed to load tip {tip_block_ch}/{tip_block_bh}: {e:?}"); NakamotoNodeError::ParentNotFound })? .map(|header| header.stacks_block_height) else { warn!( - "Relayer: failed to load height for tip {}/{} (got None)", - &tip_block_ch, &tip_block_bh + "Relayer: failed to load height for tip {tip_block_ch}/{tip_block_bh} (got None)" ); return Err(NakamotoNodeError::ParentNotFound); }; @@ -1128,7 +1114,7 @@ impl RelayerThread { // load up canonical sortition and stacks tips let Ok(sort_tip) = SortitionDB::get_canonical_burn_chain_tip(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical sortition tip: {:?}", &e); + error!("Failed to load canonical sortition tip: {e:?}"); e }) else { @@ -1138,7 +1124,7 @@ impl RelayerThread { // NOTE: this may be an epoch2x tip let Ok((stacks_tip_ch, stacks_tip_bh)) = SortitionDB::get_canonical_stacks_chain_tip_hash(self.sortdb.conn()).map_err(|e| { - error!("Failed to load canonical stacks tip: {:?}", &e); + error!("Failed to load canonical stacks tip: {e:?}"); e }) else { @@ -1247,25 +1233,19 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice::(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; @@ -1275,7 +1255,7 @@ impl RelayerThread { return None; } - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs index b2f892e1f1..14eeef20b9 100644 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs @@ -91,7 +91,7 @@ impl SignCoordinator { let is_mainnet = config.is_mainnet(); let Some(ref reward_set_signers) = reward_set.signers else { error!("Could not initialize signing coordinator for reward set without signer"); - debug!("reward set: {:?}", &reward_set); + debug!("reward set: {reward_set:?}"); return Err(ChainstateError::NoRegisteredSigners(0)); }; @@ -357,9 +357,8 @@ impl SignCoordinator { .get_nakamoto_block(&block.block_id()) .map_err(|e| { warn!( - "Failed to query chainstate for block {}: {:?}", - &block.block_id(), - &e + "Failed to query chainstate for block {}: {e:?}", + &block.block_id() ); e }) @@ -551,8 +550,7 @@ impl SignCoordinator { }; responded_signers.insert(rejected_pubkey); debug!( - "Signer {} rejected our block {}/{}", - slot_id, + "Signer {slot_id} rejected our block {}/{}", &block.header.consensus_hash, &block.header.block_hash() ); @@ -564,8 +562,7 @@ impl SignCoordinator { > self.total_weight { debug!( - "{}/{} signers vote to reject our block {}/{}", - total_reject_weight, + "{total_reject_weight}/{} signers vote to reject our block {}/{}", self.total_weight, &block.header.consensus_hash, &block.header.block_hash() diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 791af3d254..63315557a1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -310,10 +310,7 @@ pub(crate) fn fault_injection_long_tenure() { error!("Parse error for STX_TEST_SLOW_TENURE"); panic!(); }; - info!( - "Fault injection: sleeping for {} milliseconds to simulate a long tenure", - tenure_time - ); + info!("Fault injection: sleeping for {tenure_time} milliseconds to simulate a long tenure"); stacks_common::util::sleep_ms(tenure_time); } @@ -578,10 +575,7 @@ impl MicroblockMinerThread { // This is an artifact of the way the MARF is built (see #1449) let sortdb = SortitionDB::open(&burn_db_path, true, burnchain.pox_constants) .map_err(|e| { - error!( - "Relayer: Could not open sortdb '{}' ({:?}); skipping tenure", - &burn_db_path, &e - ); + error!("Relayer: Could not open sortdb '{burn_db_path}' ({e:?}); skipping tenure"); e }) .ok()?; @@ -589,8 +583,7 @@ impl MicroblockMinerThread { let mut chainstate = open_chainstate_with_faults(&config) .map_err(|e| { error!( - "Relayer: Could not open chainstate '{}' ({:?}); skipping microblock tenure", - &stacks_chainstate_path, &e + "Relayer: Could not open chainstate '{stacks_chainstate_path}' ({e:?}); skipping microblock tenure" ); e }) @@ -612,10 +605,7 @@ impl MicroblockMinerThread { .. } = miner_tip; - debug!( - "Relayer: Instantiate microblock mining state off of {}/{}", - &ch, &bhh - ); + debug!("Relayer: Instantiate microblock mining state off of {ch}/{bhh}"); // we won a block! proceed to build a microblock tail if we've stored it match StacksChainState::get_anchored_block_header_info(chainstate.db(), &ch, &bhh) { @@ -664,17 +654,11 @@ impl MicroblockMinerThread { }) } Ok(None) => { - warn!( - "Relayer: No such anchored block: {}/{}. Cannot mine microblocks", - ch, bhh - ); + warn!("Relayer: No such anchored block: {ch}/{bhh}. Cannot mine microblocks"); None } Err(e) => { - warn!( - "Relayer: Failed to get anchored block cost for {}/{}: {:?}", - ch, bhh, &e - ); + warn!("Relayer: Failed to get anchored block cost for {ch}/{bhh}: {e:?}"); None } } @@ -726,7 +710,7 @@ impl MicroblockMinerThread { let block_snapshot = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &self.parent_consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -736,13 +720,13 @@ impl MicroblockMinerThread { let burn_height = block_snapshot.block_height; let ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), burn_height).map_err(|e| { - error!("Failed to get AST rules for microblock: {}", e); + error!("Failed to get AST rules for microblock: {e}"); e })?; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), burn_height) .map_err(|e| { - error!("Failed to get epoch for microblock: {}", e); + error!("Failed to get epoch for microblock: {e}"); e })? .expect("FATAL: no epoch defined") @@ -762,10 +746,10 @@ impl MicroblockMinerThread { Ok(x) => x, Err(e) => { let msg = format!( - "Failed to create a microblock miner at chaintip {}/{}: {:?}", - &self.parent_consensus_hash, &self.parent_block_hash, &e + "Failed to create a microblock miner at chaintip {}/{}: {e:?}", + &self.parent_consensus_hash, &self.parent_block_hash ); - error!("{}", msg); + error!("{msg}"); return Err(e); } }; @@ -794,7 +778,7 @@ impl MicroblockMinerThread { let (mined_microblock, new_cost) = match mint_result { Ok(x) => x, Err(e) => { - warn!("Failed to mine microblock: {}", e); + warn!("Failed to mine microblock: {e}"); return Err(e); } }; @@ -819,23 +803,23 @@ impl MicroblockMinerThread { // record this microblock somewhere if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &mined_microblock.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let mblock_bits = mined_microblock.serialize_to_vec(); let mblock_bits_hex = to_hex(&mblock_bits); let mblock_json = format!( - r#"{{"microblock":"{}","parent_consensus":"{}","parent_block":"{}"}}"#, - &mblock_bits_hex, &self.parent_consensus_hash, &self.parent_block_hash + r#"{{"microblock":"{mblock_bits_hex}","parent_consensus":"{}","parent_block":"{}"}}"#, + &self.parent_consensus_hash, &self.parent_block_hash ); file.write_all(mblock_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write microblock bits to '{:?}'", &path) + panic!("FATAL: failed to write microblock bits to '{path:?}'") }); info!( "Fault injection: bad microblock {} saved to {}", @@ -933,11 +917,11 @@ impl MicroblockMinerThread { info!("Will keep polling mempool for transactions to include in a microblock"); } Err(e) => { - warn!("Failed to mine one microblock: {:?}", &e); + warn!("Failed to mine one microblock: {e:?}"); } } } else { - debug!("Will not mine microblocks yet -- have {} attachable blocks that arrived in the last 10 minutes", num_attachable); + debug!("Will not mine microblocks yet -- have {num_attachable} attachable blocks that arrived in the last 10 minutes"); } self.last_mined = get_epoch_time_ms(); @@ -1435,8 +1419,7 @@ impl BlockMinerThread { { // This leaf does not confirm a previous-best-tip, so assign it the // worst-possible score. - info!("Tip #{} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", - i, + info!("Tip #{i} {}/{} at {}:{} conflicts with a previous best-tip {}/{} at {}:{}", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, @@ -1496,13 +1479,11 @@ impl BlockMinerThread { } info!( - "Tip #{} {}/{} at {}:{} has score {} ({})", - i, + "Tip #{i} {}/{} at {}:{} has score {score} ({})", &leaf_tip.consensus_hash, &leaf_tip.anchored_block_hash, leaf_tip.burn_height, leaf_tip.stacks_height, - score, score_summaries.join(" + ").to_string() ); if score < u64::MAX { @@ -1527,8 +1508,8 @@ impl BlockMinerThread { .expect("FATAL: candidates should not be empty"); info!( - "Best tip is #{} {}/{}", - best_tip_idx, &best_tip.consensus_hash, &best_tip.anchored_block_hash + "Best tip is #{best_tip_idx} {}/{}", + &best_tip.consensus_hash, &best_tip.anchored_block_hash ); Some((*best_tip).clone()) } @@ -1690,9 +1671,9 @@ impl BlockMinerThread { if !force { // the chain tip hasn't changed since we attempted to build a block. Use what we // already have. - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no new microblocks ({} <= {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no new microblocks ({} <= {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); return None; } @@ -1701,24 +1682,24 @@ impl BlockMinerThread { // TODO: only consider rebuilding our anchored block if we (a) have // time, and (b) the new microblocks are worth more than the new BTC // fee minus the old BTC fee - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, but there are new microblocks ({} > {} + 1)", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, but there are new microblocks ({} > {} + 1)", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); + prev_block.anchored_block.txs.len(), prev_block.burn_hash, stream.len(), prev_block.anchored_block.header.parent_microblock_sequence); best_attempt = cmp::max(best_attempt, prev_block.attempt); } } else if !force { // no microblock stream to confirm, and the stacks tip hasn't changed - info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {}, and no microblocks present", + info!("Relayer: Stacks tip is unchanged since we last tried to mine a block off of {}/{} at height {} with {} txs, in {} at burn height {parent_block_burn_height}, and no microblocks present", &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block, prev_block.anchored_block.header.total_work.work, - prev_block.anchored_block.txs.len(), prev_block.burn_hash, parent_block_burn_height); + prev_block.anchored_block.txs.len(), prev_block.burn_hash); return None; } } else if self.burn_block.burn_header_hash == prev_block.burn_hash { // only try and re-mine if there was no sortition since the last chain tip - info!("Relayer: Stacks tip has changed to {}/{} since we last tried to mine a block in {} at burn height {}; attempt was {} (for Stacks tip {}/{})", - parent_consensus_hash, stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, parent_block_burn_height, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); + info!("Relayer: Stacks tip has changed to {parent_consensus_hash}/{} since we last tried to mine a block in {} at burn height {parent_block_burn_height}; attempt was {} (for Stacks tip {}/{})", + stacks_parent_header.anchored_header.block_hash(), prev_block.burn_hash, prev_block.attempt, &prev_block.parent_consensus_hash, &prev_block.anchored_block.header.parent_block); best_attempt = cmp::max(best_attempt, prev_block.attempt); // Since the chain tip has changed, we should try to mine a new block, even // if it has less transactions than the previous block we mined, since that @@ -1726,7 +1707,7 @@ impl BlockMinerThread { max_txs = 0; } else { info!("Relayer: Burn tip has changed to {} ({}) since we last tried to mine a block in {}", - &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); + &self.burn_block.burn_header_hash, self.burn_block.block_height, &prev_block.burn_hash); } } (best_attempt + 1, max_txs) @@ -1822,9 +1803,7 @@ impl BlockMinerThread { Ok(x) => { let num_mblocks = x.as_ref().map(|(mblocks, ..)| mblocks.len()).unwrap_or(0); debug!( - "Loaded {} microblocks descending from {}/{} (data: {})", - num_mblocks, - parent_consensus_hash, + "Loaded {num_mblocks} microblocks descending from {parent_consensus_hash}/{} (data: {})", &stacks_parent_header.anchored_header.block_hash(), x.is_some() ); @@ -1832,10 +1811,8 @@ impl BlockMinerThread { } Err(e) => { warn!( - "Failed to load descendant microblock stream from {}/{}: {:?}", - parent_consensus_hash, - &stacks_parent_header.anchored_header.block_hash(), - &e + "Failed to load descendant microblock stream from {parent_consensus_hash}/{}: {e:?}", + &stacks_parent_header.anchored_header.block_hash() ); None } @@ -1855,7 +1832,7 @@ impl BlockMinerThread { stacks_parent_header.microblock_tail = microblocks.last().map(|blk| blk.header.clone()); if let Some(poison_payload) = poison_opt { - debug!("Detected poisoned microblock fork: {:?}", &poison_payload); + debug!("Detected poisoned microblock fork: {poison_payload:?}"); // submit it multiple times with different nonces, so it'll have a good chance of // eventually getting picked up (even if the miner sends other transactions from @@ -1877,15 +1854,9 @@ impl BlockMinerThread { Some(&self.event_dispatcher), 1_000_000_000.0, // prioritize this for inclusion ) { - warn!( - "Detected but failed to mine poison-microblock transaction: {:?}", - &e - ); + warn!("Detected but failed to mine poison-microblock transaction: {e:?}"); } else { - debug!( - "Submit poison-microblock transaction {:?}", - &poison_microblock_tx - ); + debug!("Submit poison-microblock transaction {poison_microblock_tx:?}"); } } } @@ -1918,7 +1889,7 @@ impl BlockMinerThread { } btc_addrs .into_iter() - .map(|addr| format!("{}", &addr)) + .map(|addr| format!("{addr}")) .collect() } @@ -1951,7 +1922,7 @@ impl BlockMinerThread { }; let Ok(tip) = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).map_err(|e| { - warn!("Failed to load canonical burn chain tip: {:?}", &e); + warn!("Failed to load canonical burn chain tip: {e:?}"); e }) else { return config_file_burn_fee_cap; @@ -1959,10 +1930,7 @@ impl BlockMinerThread { let tip = if let Some(at_burn_block) = at_burn_block.as_ref() { let ih = sortdb.index_handle(&tip.sortition_id); let Ok(Some(ancestor_tip)) = ih.get_block_snapshot_by_height(*at_burn_block) else { - warn!( - "Failed to load ancestor tip at burn height {}", - at_burn_block - ); + warn!("Failed to load ancestor tip at burn height {at_burn_block}"); return config_file_burn_fee_cap; }; ancestor_tip @@ -1972,7 +1940,7 @@ impl BlockMinerThread { let Ok(active_miners_and_commits) = MinerStats::get_active_miners(sortdb, at_burn_block) .map_err(|e| { - warn!("Failed to get active miners: {:?}", &e); + warn!("Failed to get active miners: {e:?}"); e }) else { @@ -1988,12 +1956,12 @@ impl BlockMinerThread { .map(|(miner, _cmt)| miner.as_str()) .collect(); - info!("Active miners: {:?}", &active_miners); + info!("Active miners: {active_miners:?}"); let Ok(unconfirmed_block_commits) = miner_stats .get_unconfirmed_commits(tip.block_height + 1, &active_miners) .map_err(|e| { - warn!("Failed to find unconfirmed block-commits: {}", &e); + warn!("Failed to find unconfirmed block-commits: {e}"); e }) else { @@ -2005,10 +1973,7 @@ impl BlockMinerThread { .map(|cmt| (cmt.apparent_sender.to_string(), cmt.burn_fee)) .collect(); - info!( - "Found unconfirmed block-commits: {:?}", - &unconfirmed_miners_and_amounts - ); + info!("Found unconfirmed block-commits: {unconfirmed_miners_and_amounts:?}"); let (spend_dist, _total_spend) = MinerStats::get_spend_distribution( &active_miners_and_commits, @@ -2034,7 +1999,7 @@ impl BlockMinerThread { at_burn_block, ) .map_err(|e| { - warn!("Failed to get unconfirmed burn distribution: {:?}", &e); + warn!("Failed to get unconfirmed burn distribution: {e:?}"); e }) else { @@ -2044,10 +2009,10 @@ impl BlockMinerThread { MinerStats::burn_dist_to_prob_dist(&unconfirmed_burn_dist) }; - info!("Unconfirmed spend distribution: {:?}", &spend_dist); + info!("Unconfirmed spend distribution: {spend_dist:?}"); info!( - "Unconfirmed win probabilities (fast_rampup={}): {:?}", - miner_config.fast_rampup, &win_probs + "Unconfirmed win probabilities (fast_rampup={}): {win_probs:?}", + miner_config.fast_rampup ); let miner_addrs = Self::get_miner_addrs(config, keychain); @@ -2058,8 +2023,8 @@ impl BlockMinerThread { .unwrap_or(0.0); info!( - "This miner's win probability at {} is {}", - tip.block_height, &win_prob + "This miner's win probability at {} is {win_prob}", + tip.block_height ); set_prior_winning_prob(tip.block_height, win_prob); @@ -2082,8 +2047,7 @@ impl BlockMinerThread { let prior_win_prob = get_prior_winning_prob(prior_burn_height); if prior_win_prob < config.miner.target_win_probability { info!( - "Miner underperformed in block {} ({}/{})", - prior_burn_height, underperformed_count, underperform_stop_threshold + "Miner underperformed in block {prior_burn_height} ({underperformed_count}/{underperform_stop_threshold})" ); underperformed_count += 1; } @@ -2126,7 +2090,7 @@ impl BlockMinerThread { ) { Ok(x) => x, Err(e) => { - error!("Relayer: Failure fetching recipient set: {:?}", e); + error!("Relayer: Failure fetching recipient set: {e:?}"); return None; } }; @@ -2533,10 +2497,7 @@ impl BlockMinerThread { if cfg!(test) { if let Ok(mblock_pubkey_hash_str) = std::env::var("STACKS_MICROBLOCK_PUBKEY_HASH") { if let Ok(bad_pubkh) = Hash160::from_hex(&mblock_pubkey_hash_str) { - debug!( - "Fault injection: set microblock public key hash to {}", - &bad_pubkh - ); + debug!("Fault injection: set microblock public key hash to {bad_pubkh}"); pubkh = bad_pubkh } } @@ -2621,13 +2582,13 @@ impl BlockMinerThread { ) { Ok(block) => block, Err(e) => { - error!("Relayer: Failure mining anchor block even after removing offending microblock {}: {}", &mblock_header_hash, &e); + error!("Relayer: Failure mining anchor block even after removing offending microblock {mblock_header_hash}: {e}"); return None; } } } Err(e) => { - error!("Relayer: Failure mining anchored block: {}", e); + error!("Relayer: Failure mining anchored block: {e}"); return None; } }; @@ -2646,12 +2607,12 @@ impl BlockMinerThread { if miner_config.only_increase_tx_count && max_txs > u64::try_from(anchored_block.txs.len()).expect("too many txs") { - info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {} txs", anchored_block.txs.len(), max_txs); + info!("Relayer: Succeeded assembling subsequent block with {} txs, but had previously produced a block with {max_txs} txs", anchored_block.txs.len()); return None; } info!( - "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {}", + "Relayer: Succeeded assembling {} block #{}: {}, with {} txs, attempt {attempt}", if parent_block_info.parent_block_total_burn == 0 { "Genesis" } else { @@ -2659,8 +2620,7 @@ impl BlockMinerThread { }, anchored_block.header.total_work.work, anchored_block.block_hash(), - anchored_block.txs.len(), - attempt + anchored_block.txs.len() ); // let's commit @@ -2777,7 +2737,7 @@ impl BlockMinerThread { return None; } Err(e) => { - warn!("Relayer: Failed to submit Bitcoin transaction: {:?}", e); + warn!("Relayer: Failed to submit Bitcoin transaction: {e:?}"); self.failed_to_submit_last_attempt = true; return None; } @@ -3071,7 +3031,7 @@ impl RelayerThread { let burn_height = SortitionDB::get_block_snapshot_consensus(self.sortdb_ref().conn(), consensus_hash) .map_err(|e| { - error!("Failed to find block snapshot for mined block: {}", e); + error!("Failed to find block snapshot for mined block: {e}"); e })? .ok_or_else(|| { @@ -3104,22 +3064,20 @@ impl RelayerThread { // record this block somewhere if fs::metadata(&path).is_err() { fs::create_dir_all(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path}'")); } let path = Path::new(&path); let path = path.join(Path::new(&format!("{}", &anchored_block.block_hash()))); let mut file = fs::File::create(&path) - .unwrap_or_else(|_| panic!("FATAL: could not create '{:?}'", &path)); + .unwrap_or_else(|_| panic!("FATAL: could not create '{path:?}'")); let block_bits = anchored_block.serialize_to_vec(); let block_bits_hex = to_hex(&block_bits); - let block_json = format!( - r#"{{"block":"{}","consensus":"{}"}}"#, - &block_bits_hex, &consensus_hash - ); + let block_json = + format!(r#"{{"block":"{block_bits_hex}","consensus":"{consensus_hash}"}}"#); file.write_all(block_json.as_bytes()).unwrap_or_else(|_| { - panic!("FATAL: failed to write block bits to '{:?}'", &path) + panic!("FATAL: failed to write block bits to '{path:?}'") }); info!( "Fault injection: bad block {} saved to {}", @@ -3229,8 +3187,8 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); debug!( - "Relayer: Process tenure {}/{} in {} burn height {}", - &consensus_hash, &block_header_hash, &burn_hash, sn.block_height + "Relayer: Process tenure {consensus_hash}/{block_header_hash} in {burn_hash} burn height {}", + sn.block_height ); if let Some((last_mined_block_data, microblock_privkey)) = @@ -3247,8 +3205,7 @@ impl RelayerThread { let reward_block_height = mined_block.header.total_work.work + MINER_REWARD_MATURITY; info!( - "Relayer: Won sortition! Mining reward will be received in {} blocks (block #{})", - MINER_REWARD_MATURITY, reward_block_height + "Relayer: Won sortition! Mining reward will be received in {MINER_REWARD_MATURITY} blocks (block #{reward_block_height})" ); debug!("Relayer: Won sortition!"; "stacks_header" => %block_header_hash, @@ -3267,7 +3224,7 @@ impl RelayerThread { return (false, None); } Err(e) => { - warn!("Error processing my tenure, bad block produced: {}", e); + warn!("Error processing my tenure, bad block produced: {e}"); warn!( "Bad block"; "stacks_header" => %block_header_hash, @@ -3289,7 +3246,7 @@ impl RelayerThread { }; if let Err(e) = self.relayer.advertize_blocks(blocks_available, block_data) { - warn!("Failed to advertise new block: {}", e); + warn!("Failed to advertise new block: {e}"); } let snapshot = SortitionDB::get_block_snapshot_consensus( @@ -3301,8 +3258,7 @@ impl RelayerThread { if !snapshot.pox_valid { warn!( - "Snapshot for {} is no longer valid; discarding {}...", - &consensus_hash, + "Snapshot for {consensus_hash} is no longer valid; discarding {}...", &mined_block.block_hash() ); miner_tip = Self::pick_higher_tip(miner_tip, None); @@ -3325,7 +3281,7 @@ impl RelayerThread { .relayer .broadcast_block(snapshot.consensus_hash, mined_block) { - warn!("Failed to push new block: {}", e); + warn!("Failed to push new block: {e}"); } } @@ -3348,8 +3304,7 @@ impl RelayerThread { } } else { debug!( - "Relayer: Did not win sortition in {}, winning block was {}/{}", - &burn_hash, &consensus_hash, &block_header_hash + "Relayer: Did not win sortition in {burn_hash}, winning block was {consensus_hash}/{block_header_hash}" ); miner_tip = None; } @@ -3484,11 +3439,9 @@ impl RelayerThread { || mtip.block_hash != stacks_tip_block_hash { debug!( - "Relayer: miner tip {}/{} is NOT canonical ({}/{})", + "Relayer: miner tip {}/{} is NOT canonical ({stacks_tip_consensus_hash}/{stacks_tip_block_hash})", &mtip.consensus_hash, &mtip.block_hash, - &stacks_tip_consensus_hash, - &stacks_tip_block_hash ); miner_tip = None; } else { @@ -3549,10 +3502,7 @@ impl RelayerThread { let best_tip = Self::pick_higher_tip(my_miner_tip.clone(), new_miner_tip.clone()); if best_tip == new_miner_tip && best_tip != my_miner_tip { // tip has changed - debug!( - "Relayer: Best miner tip went from {:?} to {:?}", - &my_miner_tip, &new_miner_tip - ); + debug!("Relayer: Best miner tip went from {my_miner_tip:?} to {new_miner_tip:?}"); self.microblock_stream_cost = ExecutionCost::zero(); } self.miner_tip = best_tip; @@ -3652,14 +3602,14 @@ impl RelayerThread { for (stacks_bhh, (assembled_block, microblock_privkey)) in last_mined_blocks.into_iter() { if assembled_block.burn_block_height < burn_height { debug!( - "Stale mined block: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Stale mined block: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); continue; } debug!( - "Mined block in-flight: {} (as of {},{})", - &stacks_bhh, &assembled_block.burn_hash, assembled_block.burn_block_height + "Mined block in-flight: {stacks_bhh} (as of {},{})", + &assembled_block.burn_hash, assembled_block.burn_block_height ); ret.insert(stacks_bhh, (assembled_block, microblock_privkey)); } @@ -3724,8 +3674,7 @@ impl RelayerThread { if burn_chain_tip != burn_header_hash { debug!( - "Relayer: Drop stale RunTenure for {}: current sortition is for {}", - &burn_header_hash, &burn_chain_tip + "Relayer: Drop stale RunTenure for {burn_header_hash}: current sortition is for {burn_chain_tip}" ); self.globals.counters.bump_missed_tenures(); return None; @@ -3741,8 +3690,7 @@ impl RelayerThread { ); if has_unprocessed { debug!( - "Relayer: Drop RunTenure for {} because there are fewer than {} pending blocks", - &burn_header_hash, + "Relayer: Drop RunTenure for {burn_header_hash} because there are fewer than {} pending blocks", self.burnchain.pox_constants.prepare_length - 1 ); return None; @@ -3772,7 +3720,7 @@ impl RelayerThread { // if we're still mining on this burn block, then do nothing if self.miner_thread.is_some() { - debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {}", &burn_chain_tip); + debug!("Relayer: will NOT run tenure since miner thread is already running for burn tip {burn_chain_tip}"); return None; } @@ -3820,7 +3768,7 @@ impl RelayerThread { .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || { if let Err(e) = miner_thread_state.send_mock_miner_messages() { - warn!("Failed to send mock miner messages: {}", e); + warn!("Failed to send mock miner messages: {e}"); } miner_thread_state.run_tenure() }) @@ -3943,10 +3891,7 @@ impl RelayerThread { let parent_consensus_hash = &miner_tip.consensus_hash; let parent_block_hash = &miner_tip.block_hash; - debug!( - "Relayer: Run microblock tenure for {}/{}", - parent_consensus_hash, parent_block_hash - ); + debug!("Relayer: Run microblock tenure for {parent_consensus_hash}/{parent_block_hash}"); let Some(mut microblock_thread_state) = MicroblockMinerThread::from_relayer_thread(self) else { @@ -4027,11 +3972,9 @@ impl RelayerThread { .set_ongoing_commit(ongoing_commit_opt); debug!( - "Relayer: RunTenure finished at {} (in {}ms) targeting {} (originally {})", + "Relayer: RunTenure finished at {} (in {}ms) targeting {bhh} (originally {orig_bhh})", self.last_tenure_issue_time, - self.last_tenure_issue_time.saturating_sub(tenure_begin), - &bhh, - &orig_bhh + self.last_tenure_issue_time.saturating_sub(tenure_begin) ); // this stacks block confirms all in-flight microblocks we know about, @@ -4060,11 +4003,9 @@ impl RelayerThread { ); info!( - "Mined one microblock: {} seq {} txs {} (total processed: {})", - µblock_hash, + "Mined one microblock: {microblock_hash} seq {} txs {} (total processed: {num_mblocks})", next_microblock.header.sequence, - next_microblock.txs.len(), - num_mblocks + next_microblock.txs.len() ); self.globals.counters.set_microblocks_processed(num_mblocks); @@ -4084,8 +4025,7 @@ impl RelayerThread { next_microblock, ) { error!( - "Failure trying to broadcast microblock {}: {}", - microblock_hash, e + "Failure trying to broadcast microblock {microblock_hash}: {e}" ); } @@ -4110,7 +4050,7 @@ impl RelayerThread { self.mined_stacks_block = false; } Err(e) => { - warn!("Relayer: Failed to mine next microblock: {:?}", &e); + warn!("Relayer: Failed to mine next microblock: {e:?}"); // switch back to block mining self.mined_stacks_block = false; @@ -4151,28 +4091,22 @@ impl RelayerThread { let mut f = match fs::File::open(path) { Ok(f) => f, Err(e) => { - warn!("Could not open {}: {:?}", &path, &e); + warn!("Could not open {path}: {e:?}"); return None; } }; let mut registered_key_bytes = vec![]; if let Err(e) = f.read_to_end(&mut registered_key_bytes) { - warn!( - "Failed to read registered key bytes from {}: {:?}", - path, &e - ); + warn!("Failed to read registered key bytes from {path}: {e:?}"); return None; } let Ok(registered_key) = serde_json::from_slice(®istered_key_bytes) else { - warn!( - "Did not load registered key from {}: could not decode JSON", - &path - ); + warn!("Did not load registered key from {path}: could not decode JSON"); return None; }; - info!("Loaded registered key from {}", &path); + info!("Loaded registered key from {path}"); Some(registered_key) } @@ -4331,9 +4265,9 @@ impl ParentStacksBlockInfo { return Err(Error::BurnchainTipChanged); } - debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {mine_tip_ch} (height {} hash {})", &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, - mine_tip_ch, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); + parent_snapshot.block_height, &parent_snapshot.burn_header_hash); let coinbase_nonce = { let principal = miner_address.into(); @@ -4345,8 +4279,7 @@ impl ParentStacksBlockInfo { ) .unwrap_or_else(|| { panic!( - "BUG: stacks tip block {}/{} no longer exists after we queried it", - mine_tip_ch, mine_tip_bh + "BUG: stacks tip block {mine_tip_ch}/{mine_tip_bh} no longer exists after we queried it" ) }); account.nonce @@ -4541,8 +4474,7 @@ impl PeerThread { let poll_ms = if !download_backpressure && self.get_network().has_more_downloads() { // keep getting those blocks -- drive the downloader state-machine debug!( - "P2P: backpressure: {}, more downloads: {}", - download_backpressure, + "P2P: backpressure: {download_backpressure}, more downloads: {}", self.get_network().has_more_downloads() ); 1 @@ -4626,7 +4558,7 @@ impl PeerThread { Err(e) => { // this is only reachable if the network is not instantiated correctly -- // i.e. you didn't connect it - panic!("P2P: Failed to process network dispatch: {:?}", &e); + panic!("P2P: Failed to process network dispatch: {e:?}"); } }; @@ -4688,9 +4620,8 @@ impl StacksNode { pub(crate) fn setup_ast_size_precheck(config: &Config, sortdb: &mut SortitionDB) { if let Some(ast_precheck_size_height) = config.burnchain.ast_precheck_size_height { info!( - "Override burnchain height of {:?} to {}", - ASTRules::PrecheckSize, - ast_precheck_size_height + "Override burnchain height of {:?} to {ast_precheck_size_height}", + ASTRules::PrecheckSize ); let mut tx = sortdb .tx_begin() @@ -4778,11 +4709,7 @@ impl StacksNode { stackerdb_contract_ids, ) .map_err(|e| { - eprintln!( - "Failed to open {}: {:?}", - &config.get_peer_db_file_path(), - &e - ); + eprintln!("Failed to open {}: {e:?}", &config.get_peer_db_file_path()); panic!(); }) .unwrap(); @@ -5035,7 +4962,7 @@ impl StacksNode { .get_miner_address(StacksEpochId::Epoch21, &public_key); let miner_addr_str = addr2str(&miner_addr); let _ = monitoring::set_burnchain_signer(BurnchainSigner(miner_addr_str)).map_err(|e| { - warn!("Failed to set global burnchain signer: {:?}", &e); + warn!("Failed to set global burnchain signer: {e:?}"); e }); } @@ -5259,14 +5186,14 @@ impl StacksNode { for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( - "Received burnchain block #{} including block_commit_op (winning) - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op (winning) - {} ({})", + op.apparent_sender, &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); } else if self.is_miner { info!( - "Received burnchain block #{} including block_commit_op - {} ({})", - block_height, op.apparent_sender, &op.block_header_hash + "Received burnchain block #{block_height} including block_commit_op - {} ({})", + op.apparent_sender, &op.block_header_hash ); } } @@ -5280,8 +5207,7 @@ impl StacksNode { let num_key_registers = key_registers.len(); debug!( - "Processed burnchain state at height {}: {} leader keys, {} block-commits (ibd = {})", - block_height, num_key_registers, num_block_commits, ibd + "Processed burnchain state at height {block_height}: {num_key_registers} leader keys, {num_block_commits} block-commits (ibd = {ibd})" ); // save the registered VRF key @@ -5297,7 +5223,7 @@ impl StacksNode { return ret; }; - info!("Activated VRF key; saving to {}", &path); + info!("Activated VRF key; saving to {path}"); let Ok(key_json) = serde_json::to_string(&activated_key) else { warn!("Failed to serialize VRF key"); @@ -5307,17 +5233,17 @@ impl StacksNode { let mut f = match fs::File::create(path) { Ok(f) => f, Err(e) => { - warn!("Failed to create {}: {:?}", &path, &e); + warn!("Failed to create {path}: {e:?}"); return ret; } }; if let Err(e) = f.write_all(key_json.as_bytes()) { - warn!("Failed to write activated VRF key to {}: {:?}", &path, &e); + warn!("Failed to write activated VRF key to {path}: {e:?}"); return ret; } - info!("Saved activated VRF key to {}", &path); + info!("Saved activated VRF key to {path}"); ret } diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 62b6b094aa..3636223b3f 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -191,7 +191,7 @@ fn spawn_peer( let sortdb = match SortitionDB::open(&burn_db_path, false, pox_consts.clone()) { Ok(x) => x, Err(e) => { - warn!("Error while connecting burnchain db in peer loop: {}", e); + warn!("Error while connecting burnchain db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -204,7 +204,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting chainstate db in peer loop: {}", e); + warn!("Error while connecting chainstate db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -222,7 +222,7 @@ fn spawn_peer( ) { Ok(x) => x, Err(e) => { - warn!("Error while connecting to mempool db in peer loop: {}", e); + warn!("Error while connecting to mempool db in peer loop: {e}"); thread::sleep(time::Duration::from_secs(1)); continue; } @@ -319,9 +319,8 @@ impl Node { let (chain_state, receipts) = match chain_state_result { Ok(res) => res, Err(err) => panic!( - "Error while opening chain state at path {}: {:?}", - config.get_chainstate_path_str(), - err + "Error while opening chain state at path {}: {err:?}", + config.get_chainstate_path_str() ), }; @@ -419,7 +418,7 @@ impl Node { let initial_neighbors = self.config.node.bootstrap_node.clone(); - println!("BOOTSTRAP WITH {:?}", initial_neighbors); + println!("BOOTSTRAP WITH {initial_neighbors:?}"); let rpc_sock: SocketAddr = self.config.node.rpc_bind.parse().unwrap_or_else(|_| { @@ -789,15 +788,13 @@ impl Node { ) .unwrap_or_else(|_| { panic!( - "BUG: could not query chainstate to find parent consensus hash of {}/{}", - consensus_hash, + "BUG: could not query chainstate to find parent consensus hash of {consensus_hash}/{}", &anchored_block.block_hash() ) }) .unwrap_or_else(|| { panic!( - "BUG: no such parent of block {}/{}", - consensus_hash, + "BUG: no such parent of block {consensus_hash}/{}", &anchored_block.block_hash() ) }); @@ -852,7 +849,7 @@ impl Node { ) }; match process_blocks_at_tip { - Err(e) => panic!("Error while processing block - {:?}", e), + Err(e) => panic!("Error while processing block - {e:?}"), Ok(ref mut blocks) => { if blocks.is_empty() { break; diff --git a/testnet/stacks-node/src/operations.rs b/testnet/stacks-node/src/operations.rs index 0109077a5f..7e26fb42e2 100644 --- a/testnet/stacks-node/src/operations.rs +++ b/testnet/stacks-node/src/operations.rs @@ -43,7 +43,7 @@ impl BurnchainOpSigner { let signature = match self.secret_key.sign(hash) { Ok(r) => r, Err(e) => { - debug!("Secret key error: {:?}", &e); + debug!("Secret key error: {e:?}"); return None; } }; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index ce4c06a16c..7990c04332 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -132,7 +132,7 @@ impl RunLoopCallbacks { match &tx.payload { TransactionPayload::Coinbase(..) => println!(" Coinbase"), TransactionPayload::SmartContract(contract, ..) => println!(" Publish smart contract\n**************************\n{:?}\n**************************", contract.code_body), - TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {} µSTX to {}", amount, recipent), + TransactionPayload::TokenTransfer(recipent, amount, _) => println!(" Transfering {amount} µSTX to {recipent}"), _ => println!(" {:?}", tx.payload) } } diff --git a/testnet/stacks-node/src/run_loop/nakamoto.rs b/testnet/stacks-node/src/run_loop/nakamoto.rs index de836568d2..16f5a12b2d 100644 --- a/testnet/stacks-node/src/run_loop/nakamoto.rs +++ b/testnet/stacks-node/src/run_loop/nakamoto.rs @@ -168,7 +168,7 @@ impl RunLoop { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { - warn!("Error when creating wallet: {:?}", e); + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -429,7 +429,7 @@ impl RunLoop { return; } Err(e) => { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return; } @@ -522,10 +522,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; let mut poll_deadline = 0; @@ -573,11 +570,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -598,7 +594,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -612,15 +608,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -666,8 +660,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -699,7 +692,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -709,9 +702,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -722,13 +713,11 @@ impl RunLoop { if last_tenure_sortition_height != sortition_db_height { if is_miner { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); } else { info!( - "Runloop: Synchronized full burnchain up to height {}.", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}." ); } last_tenure_sortition_height = sortition_db_height; diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 7be8939d9e..dd64fb5685 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -342,7 +342,7 @@ impl RunLoop { } } _ => { - let msg = format!("Graceful termination request received (signal `{}`), will complete the ongoing runloop cycles and terminate\n", sig_id); + let msg = format!("Graceful termination request received (signal `{sig_id}`), will complete the ongoing runloop cycles and terminate\n"); async_safe_write_stderr(&msg); keep_running_writer.store(false, Ordering::SeqCst); } @@ -353,7 +353,7 @@ impl RunLoop { if cfg!(test) || allow_err { info!("Error setting up signal handler, may have already been set"); } else { - panic!("FATAL: error setting termination handler - {}", e); + panic!("FATAL: error setting termination handler - {e}"); } } } @@ -370,7 +370,7 @@ impl RunLoop { let keychain = Keychain::default(self.config.node.seed.clone()); let mut op_signer = keychain.generate_op_signer(); if let Err(e) = burnchain.create_wallet_if_dne() { - warn!("Error when creating wallet: {:?}", e); + warn!("Error when creating wallet: {e:?}"); } let mut btc_addrs = vec![( StacksEpochId::Epoch2_05, @@ -461,7 +461,7 @@ impl RunLoop { panic!(); } Err(e) => { - panic!("FATAL: unable to query filesystem or databases: {:?}", &e); + panic!("FATAL: unable to query filesystem or databases: {e:?}"); } } @@ -475,13 +475,13 @@ impl RunLoop { Some(burnchain_tip) => { // database exists already, and has blocks -- just sync to its tip. let target_height = burnchain_tip.block_height + 1; - debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {}", burnchain_tip.block_height, target_height); + debug!("Burnchain DB exists and has blocks up to {}; synchronizing from where it left off up to {target_height}", burnchain_tip.block_height); target_height } None => { // database does not exist yet let target_height = 1.max(burnchain_config.first_block_height + 1); - debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {}", target_height); + debug!("Burnchain DB does not exist or does not have blocks; synchronizing to first burnchain block height {target_height}"); target_height } }; @@ -492,16 +492,16 @@ impl RunLoop { if matches!(e, Error::CoordinatorClosed) && !should_keep_running.load(Ordering::SeqCst) { - info!("Shutdown initiated during burnchain initialization: {}", e); + info!("Shutdown initiated during burnchain initialization: {e}"); return burnchain_error::ShutdownInitiated; } - error!("Burnchain controller stopped: {}", e); + error!("Burnchain controller stopped: {e}"); panic!(); })?; // if the chainstate DBs don't exist, this will instantiate them if let Err(e) = burnchain_controller.connect_dbs() { - error!("Failed to connect to burnchain databases: {}", e); + error!("Failed to connect to burnchain databases: {e}"); panic!(); }; @@ -739,7 +739,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -755,7 +755,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -781,26 +781,24 @@ impl RunLoop { .find_divergence(&heaviest_affirmation_map) .is_some() { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})"); globals.coord().announce_new_burn_block(); } else if highest_sn.block_height == sn.block_height && sn.block_height == canonical_burnchain_tip.block_height { // need to force an affirmation reorg because there will be no more burn block // announcements. - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, burn height {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, sn.block_height); + debug!("Drive burn block processing: possible PoX reorg (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, burn height {})", sn.block_height); globals.coord().announce_new_burn_block(); } debug!( - "Drive stacks block processing: possible PoX reorg (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: possible PoX reorg (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); globals.coord().announce_new_stacks_block(); } else { debug!( - "Drive stacks block processing: no need (stacks tip: {}, heaviest: {})", - &stacks_tip_affirmation_map, &heaviest_affirmation_map + "Drive stacks block processing: no need (stacks tip: {stacks_tip_affirmation_map}, heaviest: {heaviest_affirmation_map})" ); // announce a new stacks block to force the chains coordinator @@ -871,7 +869,7 @@ impl RunLoop { match SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id) { Ok(am) => am, Err(e) => { - warn!("Failed to find sortition affirmation map: {:?}", &e); + warn!("Failed to find sortition affirmation map: {e:?}"); return; } }; @@ -887,7 +885,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find heaviest affirmation map: {:?}", &e); + warn!("Failed to find heaviest affirmation map: {e:?}"); return; } }; @@ -902,7 +900,7 @@ impl RunLoop { ) { Ok(am) => am, Err(e) => { - warn!("Failed to find canonical affirmation map: {:?}", &e); + warn!("Failed to find canonical affirmation map: {e:?}"); return; } }; @@ -913,7 +911,7 @@ impl RunLoop { .is_some() || sn.block_height < highest_sn.block_height { - debug!("Drive burn block processing: possible PoX reorg (sortition tip: {}, heaviest: {}, {} = (heaviest_affirmation_map.len() as u64) { // we have unaffirmed PoX anchor blocks that are not yet processed in the sortition history - debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {}, heaviest: {}, canonical: {})", &sortition_tip_affirmation_map, &heaviest_affirmation_map, &canonical_affirmation_map); + debug!("Drive burnchain processing: possible PoX reorg from unprocessed anchor block(s) (sortition tip: {sortition_tip_affirmation_map}, heaviest: {heaviest_affirmation_map}, canonical: {canonical_affirmation_map})"); globals.coord().announce_new_burn_block(); globals.coord().announce_new_stacks_block(); *last_announce_time = get_epoch_time_secs().into(); @@ -933,9 +931,7 @@ impl RunLoop { } } else { debug!( - "Drive burn block processing: no need (sortition tip: {}, heaviest: {}, {} { - error!("Error initializing burnchain: {}", e); + error!("Error initializing burnchain: {e}"); info!("Exiting stacks-node"); return None; } @@ -1160,10 +1156,7 @@ impl RunLoop { burnchain.get_headers_height() - 1, ); - debug!( - "Runloop: Begin main runloop starting a burnchain block {}", - sortition_db_height - ); + debug!("Runloop: Begin main runloop starting a burnchain block {sortition_db_height}"); let mut last_tenure_sortition_height = 0; @@ -1202,7 +1195,7 @@ impl RunLoop { ) { Ok(ibd) => ibd, Err(e) => { - debug!("Runloop: PoX sync wait routine aborted: {:?}", e); + debug!("Runloop: PoX sync wait routine aborted: {e:?}"); continue; } }; @@ -1223,11 +1216,10 @@ impl RunLoop { // runloop will cause the PoX sync watchdog to wait until it believes that the node has // obtained all the Stacks blocks it can. debug!( - "Runloop: Download burnchain blocks up to reward cycle #{} (height {})", + "Runloop: Download burnchain blocks up to reward cycle #{} (height {target_burnchain_block_height})", burnchain_config .block_height_to_reward_cycle(target_burnchain_block_height) - .expect("FATAL: target burnchain block height does not have a reward cycle"), - target_burnchain_block_height; + .expect("FATAL: target burnchain block height does not have a reward cycle"); "total_burn_sync_percent" => %percent, "local_burn_height" => burnchain_tip.block_snapshot.block_height, "remote_tip_height" => remote_chain_height @@ -1242,7 +1234,7 @@ impl RunLoop { match burnchain.sync(Some(target_burnchain_block_height)) { Ok(x) => x, Err(e) => { - warn!("Runloop: Burnchain controller stopped: {}", e); + warn!("Runloop: Burnchain controller stopped: {e}"); continue; } }; @@ -1256,15 +1248,13 @@ impl RunLoop { if next_sortition_height != last_tenure_sortition_height { info!( - "Runloop: Downloaded burnchain blocks up to height {}; target height is {}; remote_chain_height = {} next_sortition_height = {}, sortition_db_height = {}", - burnchain_height, target_burnchain_block_height, remote_chain_height, next_sortition_height, sortition_db_height + "Runloop: Downloaded burnchain blocks up to height {burnchain_height}; target height is {target_burnchain_block_height}; remote_chain_height = {remote_chain_height} next_sortition_height = {next_sortition_height}, sortition_db_height = {sortition_db_height}" ); } if next_sortition_height > sortition_db_height { debug!( - "Runloop: New burnchain block height {} > {}", - next_sortition_height, sortition_db_height + "Runloop: New burnchain block height {next_sortition_height} > {sortition_db_height}" ); let mut sort_count = 0; @@ -1337,8 +1327,7 @@ impl RunLoop { num_sortitions_in_last_cycle = sort_count; debug!( - "Runloop: Synchronized sortitions up to block height {} from {} (chain tip height is {}); {} sortitions", - next_sortition_height, sortition_db_height, burnchain_height, num_sortitions_in_last_cycle; + "Runloop: Synchronized sortitions up to block height {next_sortition_height} from {sortition_db_height} (chain tip height is {burnchain_height}); {num_sortitions_in_last_cycle} sortitions" ); sortition_db_height = next_sortition_height; @@ -1370,7 +1359,7 @@ impl RunLoop { remote_chain_height, ); - debug!("Runloop: Advance target burnchain block height from {} to {} (sortition height {})", target_burnchain_block_height, next_target_burnchain_block_height, sortition_db_height); + debug!("Runloop: Advance target burnchain block height from {target_burnchain_block_height} to {next_target_burnchain_block_height} (sortition height {sortition_db_height})"); target_burnchain_block_height = next_target_burnchain_block_height; if sortition_db_height >= burnchain_height && !ibd { @@ -1380,9 +1369,7 @@ impl RunLoop { .unwrap_or(0); if canonical_stacks_tip_height < mine_start { info!( - "Runloop: Synchronized full burnchain, but stacks tip height is {}, and we are trying to boot to {}, not mining until reaching chain tip", - canonical_stacks_tip_height, - mine_start + "Runloop: Synchronized full burnchain, but stacks tip height is {canonical_stacks_tip_height}, and we are trying to boot to {mine_start}, not mining until reaching chain tip" ); } else { // once we've synced to the chain tip once, don't apply this check again. @@ -1393,8 +1380,7 @@ impl RunLoop { // at tip, and not downloading. proceed to mine. if last_tenure_sortition_height != sortition_db_height { info!( - "Runloop: Synchronized full burnchain up to height {}. Proceeding to mine blocks", - sortition_db_height + "Runloop: Synchronized full burnchain up to height {sortition_db_height}. Proceeding to mine blocks" ); last_tenure_sortition_height = sortition_db_height; } diff --git a/testnet/stacks-node/src/stacks_events.rs b/testnet/stacks-node/src/stacks_events.rs index 2f96bbfe66..d7ec349466 100644 --- a/testnet/stacks-node/src/stacks_events.rs +++ b/testnet/stacks-node/src/stacks_events.rs @@ -22,10 +22,7 @@ fn main() { if help { println!("Usage: stacks-events [--addr=]"); - println!( - " --addr= Address to listen on (default: {})", - DEFAULT_ADDR - ); + println!(" --addr= Address to listen on (default: {DEFAULT_ADDR})",); return; } @@ -34,7 +31,7 @@ fn main() { fn serve_for_events(addr: &String) { let listener = TcpListener::bind(addr).unwrap(); - eprintln!("Listening on {}", addr); + eprintln!("Listening on {addr}"); for stream in listener.incoming() { let stream = stream.unwrap(); handle_connection(stream); @@ -82,14 +79,13 @@ fn handle_connection(mut stream: TcpStream) { "path": path.unwrap(), "payload": payload_json, }); - println!("{}", record); + println!("{record}"); { let contents = "Thanks!"; let response = format!( - "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}", - contents.len(), - contents + "HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{contents}", + contents.len() ); let _nmb_bytes = stream.write(response.as_bytes()).unwrap(); diff --git a/testnet/stacks-node/src/syncctl.rs b/testnet/stacks-node/src/syncctl.rs index d4c05ec7fe..395d829c8f 100644 --- a/testnet/stacks-node/src/syncctl.rs +++ b/testnet/stacks-node/src/syncctl.rs @@ -180,8 +180,7 @@ impl PoxSyncWatchdog { Ok(cs) => cs, Err(e) => { return Err(format!( - "Failed to open chainstate at '{}': {:?}", - &chainstate_path, &e + "Failed to open chainstate at '{chainstate_path}': {e:?}" )); } }; @@ -217,7 +216,7 @@ impl PoxSyncWatchdog { self.max_staging, self.last_attachable_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_attachable_query = get_epoch_time_secs(); Ok(cnt) @@ -233,7 +232,7 @@ impl PoxSyncWatchdog { self.max_staging, self.last_processed_query, ) - .map_err(|e| format!("Failed to count attachable staging blocks: {:?}", &e))?; + .map_err(|e| format!("Failed to count attachable staging blocks: {e:?}"))?; self.last_processed_query = get_epoch_time_secs(); Ok(cnt) @@ -250,13 +249,13 @@ impl PoxSyncWatchdog { last_processed_height + (burnchain.stable_confirmations as u64) < burnchain_height; if ibd { debug!( - "PoX watchdog: {} + {} < {}, so initial block download", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} < {burnchain_height}, so initial block download", + burnchain.stable_confirmations ); } else { debug!( - "PoX watchdog: {} + {} >= {}, so steady-state", - last_processed_height, burnchain.stable_confirmations, burnchain_height + "PoX watchdog: {last_processed_height} + {} >= {burnchain_height}, so steady-state", + burnchain.stable_confirmations ); } ibd @@ -344,7 +343,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -372,7 +371,7 @@ impl PoxSyncWatchdog { ) -> f64 { let this_reward_cycle = burnchain .block_height_to_reward_cycle(tip_height) - .unwrap_or_else(|| panic!("BUG: no reward cycle for {}", tip_height)); + .unwrap_or_else(|| panic!("BUG: no reward cycle for {tip_height}")); let prev_reward_cycle = this_reward_cycle.saturating_sub(1); let start_height = burnchain.reward_cycle_to_block_height(prev_reward_cycle); @@ -459,10 +458,7 @@ impl PoxSyncWatchdog { } if self.unconditionally_download { - debug!( - "PoX watchdog set to unconditionally download (ibd={})", - ibbd - ); + debug!("PoX watchdog set to unconditionally download (ibd={ibbd})"); self.relayer_comms.set_ibd(ibbd); return Ok(ibbd); } @@ -561,7 +557,7 @@ impl PoxSyncWatchdog { && get_epoch_time_secs() < expected_first_block_deadline { // still waiting for that first block in this reward cycle - debug!("PoX watchdog: Still warming up: waiting until {}s for first Stacks block download (estimated download time: {}s)...", expected_first_block_deadline, self.estimated_block_download_time); + debug!("PoX watchdog: Still warming up: waiting until {expected_first_block_deadline}s for first Stacks block download (estimated download time: {}s)...", self.estimated_block_download_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -596,8 +592,8 @@ impl PoxSyncWatchdog { let (flat_processed, processed_deviants) = PoxSyncWatchdog::is_mostly_flat(&processed_delta, 0); - debug!("PoX watchdog: flat-attachable?: {}, flat-processed?: {}, estimated block-download time: {}s, estimated block-processing time: {}s", - flat_attachable, flat_processed, self.estimated_block_download_time, self.estimated_block_process_time); + debug!("PoX watchdog: flat-attachable?: {flat_attachable}, flat-processed?: {flat_processed}, estimated block-download time: {}s, estimated block-processing time: {}s", + self.estimated_block_download_time, self.estimated_block_process_time); if flat_attachable && flat_processed && self.last_block_processed_ts == 0 { // we're flat-lining -- this may be the end of this cycle @@ -607,8 +603,8 @@ impl PoxSyncWatchdog { if self.last_block_processed_ts > 0 && get_epoch_time_secs() < expected_last_block_deadline { - debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{})s before burnchain synchronization (estimated block-processing time: {}s)", - get_epoch_time_secs() + 1, expected_last_block_deadline, self.estimated_block_process_time); + debug!("PoX watchdog: Still processing blocks; waiting until at least min({},{expected_last_block_deadline})s before burnchain synchronization (estimated block-processing time: {}s)", + get_epoch_time_secs() + 1, self.estimated_block_process_time); sleep_ms(PER_SAMPLE_WAIT_MS); continue; } @@ -617,8 +613,7 @@ impl PoxSyncWatchdog { // doing initial burnchain block download right now. // only proceed to fetch the next reward cycle's burnchain blocks if we're neither downloading nor // attaching blocks recently - debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {}, flat-processed = {}, min-attachable: {}, min-processed: {}", - flat_attachable, flat_processed, &attachable_deviants, &processed_deviants); + debug!("PoX watchdog: In initial burnchain block download: flat-attachable = {flat_attachable}, flat-processed = {flat_processed}, min-attachable: {attachable_deviants}, min-processed: {processed_deviants}"); if !flat_attachable || !flat_processed { sleep_ms(PER_SAMPLE_WAIT_MS); @@ -645,7 +640,7 @@ impl PoxSyncWatchdog { } (err_attach, err_processed) => { // can only happen on DB query failure - error!("PoX watchdog: Failed to count recently attached ('{:?}') and/or processed ('{:?}') staging blocks", &err_attach, &err_processed); + error!("PoX watchdog: Failed to count recently attached ('{err_attach:?}') and/or processed ('{err_processed:?}') staging blocks"); panic!(); } }; diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 702f6d5953..3e69ac18cc 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -118,8 +118,7 @@ impl BitcoinCoreController { } } else { return Err(BitcoinCoreError::StopFailed(format!( - "Invalid response: {:?}", - res + "Invalid response: {res:?}" ))); } } diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index ebe14bae16..1ad23db5e1 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -128,7 +128,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); mining_pubkey @@ -136,7 +136,7 @@ fn advance_to_2_1( btc_regtest_controller.bootstrap_chain(1); let mining_pubkey = btc_regtest_controller.get_mining_pubkey().unwrap(); - debug!("Mining pubkey is {}", &mining_pubkey); + debug!("Mining pubkey is {mining_pubkey}"); btc_regtest_controller.set_mining_pubkey(MINER_BURN_PUBLIC_KEY.to_string()); btc_regtest_controller.bootstrap_chain(1); @@ -154,8 +154,8 @@ fn advance_to_2_1( .get_all_utxos(&Secp256k1PublicKey::from_hex(&mining_pubkey).unwrap()); eprintln!( - "UTXOs for {} (segwit={}): {:?}", - &mining_pubkey, conf.miner.segwit, &utxos + "UTXOs for {mining_pubkey} (segwit={}): {utxos:?}", + conf.miner.segwit ); assert_eq!(utxos.len(), 1); @@ -198,8 +198,8 @@ fn advance_to_2_1( let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -217,7 +217,7 @@ fn advance_to_2_1( ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip @@ -225,7 +225,6 @@ fn advance_to_2_1( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, tip_info.burn_block_height, - res ); if tip_info.burn_block_height >= epoch_2_1 { @@ -252,7 +251,7 @@ fn advance_to_2_1( true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -410,7 +409,7 @@ fn transition_adds_burn_block_height() { for event in events.iter() { if let Some(cev) = event.get("contract_event") { // strip leading `0x` - eprintln!("{:#?}", &cev); + eprintln!("{cev:#?}"); let clarity_serialized_value = hex_bytes( str::from_utf8( &cev.get("raw_value").unwrap().as_str().unwrap().as_bytes()[2..], @@ -734,15 +733,14 @@ fn transition_fixes_bitcoin_rigidity() { ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height >= epoch_2_1 { @@ -778,7 +776,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No pox-2: {}", &e); + eprintln!("No pox-2: {e}"); // costs-3 should NOT be initialized let e = get_contract_src( @@ -788,7 +786,7 @@ fn transition_fixes_bitcoin_rigidity() { true, ) .unwrap_err(); - eprintln!("No costs-3: {}", &e); + eprintln!("No costs-3: {e}"); } next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -1093,8 +1091,7 @@ fn transition_adds_get_pox_addr_recipients() { let spender_sk = spender_sks[i]; let pox_addr_tuple = execute( &format!( - "{{ hashbytes: 0x{}, version: 0x{:02x} }}", - pox_pubkey_hash, + "{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x{:02x} }}", &(*addr_variant as u8) ), ClarityVersion::Clarity2, @@ -1136,7 +1133,7 @@ fn transition_adds_get_pox_addr_recipients() { } }; let pox_addr_tuple = execute( - &format!("{{ hashbytes: 0x{}, version: 0x{:02x} }}", &bytes, &version), + &format!("{{ hashbytes: 0x{bytes}, version: 0x{version:02x} }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1192,7 +1189,7 @@ fn transition_adds_get_pox_addr_recipients() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); test_observer::clear(); // mine through two reward cycles @@ -1200,7 +1197,7 @@ fn transition_adds_get_pox_addr_recipients() { while sort_height < stack_sort_height + (((2 * pox_constants.reward_cycle_length) + 1) as u64) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = coord_channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); + eprintln!("Sort height: {sort_height}"); } let cc_tx = make_contract_call( @@ -1285,16 +1282,16 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap(); // NOTE: there's an even number of payouts here, so this works - eprintln!("payout at {} = {}", burn_block_height, &payout); + eprintln!("payout at {burn_block_height} = {payout}"); if pox_constants.is_in_prepare_phase(0, burn_block_height) { // in prepare phase - eprintln!("{} in prepare phase", burn_block_height); + eprintln!("{burn_block_height} in prepare phase"); assert_eq!(payout, conf.burnchain.burn_fee_cap as u128); assert_eq!(pox_addr_tuples.len(), 1); } else { // in reward phase - eprintln!("{} in reward phase", burn_block_height); + eprintln!("{burn_block_height} in reward phase"); assert_eq!( payout, (conf.burnchain.burn_fee_cap / (OUTPUTS_PER_COMMIT as u64)) @@ -1309,7 +1306,7 @@ fn transition_adds_get_pox_addr_recipients() { .unwrap_or_else(|| { panic!("FATAL: invalid PoX tuple {pox_addr_value:?}") }); - eprintln!("at {}: {:?}", burn_block_height, &pox_addr); + eprintln!("at {burn_block_height}: {pox_addr:?}"); if !pox_addr.is_burn() { found_pox_addrs.insert(pox_addr); } @@ -1321,14 +1318,14 @@ fn transition_adds_get_pox_addr_recipients() { } } - eprintln!("found pox addrs: {:?}", &found_pox_addrs); + eprintln!("found pox addrs: {found_pox_addrs:?}"); assert_eq!(found_pox_addrs.len(), 7); for addr in found_pox_addrs .into_iter() .map(|addr| Value::Tuple(addr.as_clarity_tuple().unwrap())) { - eprintln!("Contains: {:?}", &addr); + eprintln!("Contains: {addr:?}"); assert!(expected_pox_addrs.contains(&addr.to_string())); } } @@ -1391,7 +1388,7 @@ fn transition_adds_mining_from_segwit() { ); if let Some(BitcoinAddress::Segwit(SegwitBitcoinAddress::P2WPKH(..))) = &utxo_addr { } else { - panic!("UTXO address was {:?}", &utxo_addr); + panic!("UTXO address was {utxo_addr:?}"); } } @@ -1424,7 +1421,7 @@ fn transition_adds_mining_from_segwit() { let txid = commits[0].txid; let tx = btc_regtest_controller.get_raw_transaction(&txid); - eprintln!("tx = {:?}", &tx); + eprintln!("tx = {tx:?}"); assert_eq!(tx.input[0].witness.len(), 2); let addr = BitcoinAddress::try_from_segwit( false, @@ -1577,7 +1574,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -1591,18 +1588,18 @@ fn transition_removes_pox_sunset() { submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {}", sort_height); + eprintln!("Sort height pox-1: {sort_height}"); // advance to next reward cycle for _i in 0..(reward_cycle_len * 2 + 2) { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } // pox must activate let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info in pox-1 = {:?}", &pox_info); + eprintln!("pox_info in pox-1 = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox"); @@ -1610,7 +1607,7 @@ fn transition_removes_pox_sunset() { while sort_height <= epoch_21 + 1 { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-1: {} <= {}", sort_height, epoch_21); + eprintln!("Sort height pox-1: {sort_height} <= {epoch_21}"); } let pox_info = get_pox_info(&http_origin).unwrap(); @@ -1618,7 +1615,7 @@ fn transition_removes_pox_sunset() { // pox is still "active" despite unlock, because there's enough participation, and also even // though the v1 block height has passed, the pox-2 contract won't be managing reward sets // until the next reward cycle - eprintln!("pox_info in pox-2 = {:?}", &pox_info); + eprintln!("pox_info in pox-2 = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); assert_eq!(&pox_info.contract_id, "ST000000000000000000002AMW42H.pox-2"); @@ -1634,7 +1631,7 @@ fn transition_removes_pox_sunset() { &[ Value::UInt(first_bal as u128 - 260 * 3), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -1651,10 +1648,7 @@ fn transition_removes_pox_sunset() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!( - "Sort height pox-1 to pox-2 with stack-stx to pox-2: {}", - sort_height - ); + eprintln!("Sort height pox-1 to pox-2 with stack-stx to pox-2: {sort_height}"); let pox_info = get_pox_info(&http_origin).unwrap(); assert!(pox_info.current_cycle.is_pox_active); @@ -1663,11 +1657,11 @@ fn transition_removes_pox_sunset() { while sort_height <= epoch_21 + reward_cycle_len { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height pox-2: {}", sort_height); + eprintln!("Sort height pox-2: {sort_height}"); } let pox_info = get_pox_info(&http_origin).unwrap(); - eprintln!("pox_info = {:?}", &pox_info); + eprintln!("pox_info = {pox_info:?}"); assert!(pox_info.current_cycle.is_pox_active); // first full reward cycle with pox-2 @@ -1839,8 +1833,8 @@ fn transition_empty_blocks() { let pox_info = get_pox_info(&http_origin).unwrap(); eprintln!( - "\nPoX info at {}\n{:?}\n\n", - tip_info.burn_block_height, &pox_info + "\nPoX info at {}\n{pox_info:?}\n\n", + tip_info.burn_block_height ); // this block is the epoch transition? @@ -1858,15 +1852,14 @@ fn transition_empty_blocks() { ) .unwrap(); debug!( - "Epoch transition at {} ({}/{}) height {}: {}", + "Epoch transition at {} ({}/{}) height {}: {res}", &StacksBlockHeader::make_index_block_hash( &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip ), &tip_info.stacks_tip_consensus_hash, &tip_info.stacks_tip, - tip_info.burn_block_height, - res + tip_info.burn_block_height ); if tip_info.burn_block_height == epoch_2_05 || tip_info.burn_block_height == epoch_2_1 { @@ -1965,7 +1958,7 @@ pub fn wait_pox_stragglers(confs: &[Config], max_stacks_tip: u64, block_time_ms: for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.stacks_tip_height < max_stacks_tip { straggler = true; @@ -2095,9 +2088,9 @@ fn test_pox_reorgs_three_flaps() { let rpc_port = 41043 + 10 * i; let p2p_port = 41043 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); // conf.connection_options.inv_reward_cycles = 10; @@ -2111,9 +2104,8 @@ fn test_pox_reorgs_three_flaps() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2194,7 +2186,7 @@ fn test_pox_reorgs_three_flaps() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2209,16 +2201,16 @@ fn test_pox_reorgs_three_flaps() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -2249,7 +2241,7 @@ fn test_pox_reorgs_three_flaps() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2273,7 +2265,7 @@ fn test_pox_reorgs_three_flaps() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -2285,7 +2277,7 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2296,7 +2288,7 @@ fn test_pox_reorgs_three_flaps() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -2304,7 +2296,7 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2313,7 +2305,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -2328,13 +2320,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2346,20 +2338,20 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2371,7 +2363,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2382,13 +2374,13 @@ fn test_pox_reorgs_three_flaps() { // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2400,7 +2392,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history continues to overtake miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2411,13 +2403,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2429,7 +2421,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0 may have won here, but its affirmation map isn't yet the heaviest. } @@ -2438,13 +2430,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2456,7 +2448,7 @@ fn test_pox_reorgs_three_flaps() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map now becomes the heaviest. } @@ -2465,13 +2457,13 @@ fn test_pox_reorgs_three_flaps() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2484,7 +2476,7 @@ fn test_pox_reorgs_three_flaps() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 0's affirmation map is now the heaviest, and there's no longer a tie. max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); @@ -2498,23 +2490,20 @@ fn test_pox_reorgs_three_flaps() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; max tip is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; max tip is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on affirmation maps for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -2619,9 +2608,9 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -2633,9 +2622,8 @@ fn test_pox_reorg_one_flap() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -2716,7 +2704,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -2731,16 +2719,16 @@ fn test_pox_reorg_one_flap() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -2771,7 +2759,7 @@ fn test_pox_reorg_one_flap() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -2795,7 +2783,7 @@ fn test_pox_reorg_one_flap() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -2807,7 +2795,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -2818,7 +2806,7 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -2826,7 +2814,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -2835,7 +2823,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -2848,13 +2836,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2866,20 +2854,20 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -2892,7 +2880,7 @@ fn test_pox_reorg_one_flap() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -2908,23 +2896,20 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3029,9 +3014,9 @@ fn test_pox_reorg_flap_duel() { let rpc_port = 41083 + 10 * i; let p2p_port = 41083 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3044,9 +3029,8 @@ fn test_pox_reorg_flap_duel() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3127,7 +3111,7 @@ fn test_pox_reorg_flap_duel() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3142,16 +3126,16 @@ fn test_pox_reorg_flap_duel() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -3182,7 +3166,7 @@ fn test_pox_reorg_flap_duel() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3206,7 +3190,7 @@ fn test_pox_reorg_flap_duel() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -3218,7 +3202,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3229,7 +3213,7 @@ fn test_pox_reorg_flap_duel() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -3237,7 +3221,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3246,7 +3230,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); //assert_eq!(tip_info.affirmations.heaviest, AffirmationMap::decode("nnnnnnnnnnnnnnnnnnnnp").unwrap()); } @@ -3268,13 +3252,13 @@ fn test_pox_reorg_flap_duel() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } @@ -3287,20 +3271,20 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -3312,7 +3296,7 @@ fn test_pox_reorg_flap_duel() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3329,7 +3313,7 @@ fn test_pox_reorg_flap_duel() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3339,16 +3323,13 @@ fn test_pox_reorg_flap_duel() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3450,9 +3431,9 @@ fn test_pox_reorg_flap_reward_cycles() { let rpc_port = 41123 + 10 * i; let p2p_port = 41123 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3464,9 +3445,8 @@ fn test_pox_reorg_flap_reward_cycles() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3547,7 +3527,7 @@ fn test_pox_reorg_flap_reward_cycles() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3562,16 +3542,16 @@ fn test_pox_reorg_flap_reward_cycles() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -3602,7 +3582,7 @@ fn test_pox_reorg_flap_reward_cycles() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -3626,7 +3606,7 @@ fn test_pox_reorg_flap_reward_cycles() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -3638,7 +3618,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -3649,7 +3629,7 @@ fn test_pox_reorg_flap_reward_cycles() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -3657,7 +3637,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -3666,7 +3646,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3686,13 +3666,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 1 is disabled for this reward cycle signal_mining_blocked(miner_status[1].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } } @@ -3701,7 +3681,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -3709,13 +3689,13 @@ fn test_pox_reorg_flap_reward_cycles() { // miner 0 is disabled for this reward cycle signal_mining_blocked(miner_status[0].clone()); for i in 0..20 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } signal_mining_ready(miner_status[0].clone()); @@ -3723,7 +3703,7 @@ fn test_pox_reorg_flap_reward_cycles() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -3740,7 +3720,7 @@ fn test_pox_reorg_flap_reward_cycles() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -3750,16 +3730,13 @@ fn test_pox_reorg_flap_reward_cycles() { // NOTE: the stacks affirmation maps will differ from the heaviest affirmation map, because the // act of flapping back and forth so much will have caused these nodes to forget about some of // their anchor blocks. This is an artifact of the test. - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -3864,9 +3841,9 @@ fn test_pox_missing_five_anchor_blocks() { let rpc_port = 41103 + 10 * i; let p2p_port = 41103 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -3878,9 +3855,8 @@ fn test_pox_missing_five_anchor_blocks() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -3961,7 +3937,7 @@ fn test_pox_missing_five_anchor_blocks() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -3976,16 +3952,16 @@ fn test_pox_missing_five_anchor_blocks() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -4016,7 +3992,7 @@ fn test_pox_missing_five_anchor_blocks() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4040,7 +4016,7 @@ fn test_pox_missing_five_anchor_blocks() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -4052,7 +4028,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4063,7 +4039,7 @@ fn test_pox_missing_five_anchor_blocks() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -4071,7 +4047,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4080,7 +4056,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4095,13 +4071,13 @@ fn test_pox_missing_five_anchor_blocks() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {} cycle {}\n\n", i, c); + eprintln!("\n\nBuild block {i} cycle {c}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -4112,7 +4088,7 @@ fn test_pox_missing_five_anchor_blocks() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4125,7 +4101,7 @@ fn test_pox_missing_five_anchor_blocks() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation @@ -4133,16 +4109,13 @@ fn test_pox_missing_five_anchor_blocks() { // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}",); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4250,9 +4223,9 @@ fn test_sortition_divergence_pre_21() { let rpc_port = 41113 + 10 * i; let p2p_port = 41113 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -4264,9 +4237,8 @@ fn test_sortition_divergence_pre_21() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -4347,7 +4319,7 @@ fn test_sortition_divergence_pre_21() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -4362,16 +4334,16 @@ fn test_sortition_divergence_pre_21() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 2: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -4402,7 +4374,7 @@ fn test_sortition_divergence_pre_21() { &[ Value::UInt(2_000_000_000_000_000 - 30_000_000), execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + &format!("{{ hashbytes: 0x{pox_pubkey_hash}, version: 0x00 }}"), ClarityVersion::Clarity1, ) .unwrap() @@ -4426,7 +4398,7 @@ fn test_sortition_divergence_pre_21() { // everyone locks up for (cnt, tx) in stacking_txs.iter().enumerate() { - eprintln!("\n\nSubmit stacking tx {}\n\n", &cnt); + eprintln!("\n\nSubmit stacking tx {cnt}\n\n"); submit_tx(&http_origin, tx); } @@ -4438,7 +4410,7 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -4449,7 +4421,7 @@ fn test_sortition_divergence_pre_21() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -4457,7 +4429,7 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -4466,7 +4438,7 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -4481,13 +4453,13 @@ fn test_sortition_divergence_pre_21() { // mine a reward cycle in which the 2.05 rules choose a PoX anchor block, but the 2.1 rules do // not. for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len && i < reward_cycle_len - prepare_phase_len + 3 @@ -4516,26 +4488,26 @@ fn test_sortition_divergence_pre_21() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); max_stacks_tip = std::cmp::max(tip_info.stacks_tip_height, max_stacks_tip); } info!("####################### end of cycle ##############################"); @@ -4543,13 +4515,13 @@ fn test_sortition_divergence_pre_21() { // run some cycles in 2.1 for _ in 0..2 { for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } } } @@ -4561,23 +4533,20 @@ fn test_sortition_divergence_pre_21() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate. // miner 1 should learn about all of miner 0's blocks - info!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + info!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } @@ -4767,8 +4736,7 @@ fn trait_invocation_cross_epoch() { "invoke-simple", "invocation-2", &[Value::Principal(PrincipalData::Contract( - QualifiedContractIdentifier::parse(&format!("{}.{}", &spender_addr_c32, "impl-simple")) - .unwrap(), + QualifiedContractIdentifier::parse(&format!("{spender_addr_c32}.impl-simple")).unwrap(), ))], ); let invoke_2_txid = submit_tx(&http_origin, &tx); @@ -4951,7 +4919,7 @@ fn test_v1_unlock_height_with_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -4972,7 +4940,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until epoch 2.1 @@ -4988,7 +4956,7 @@ fn test_v1_unlock_height_with_current_stackers() { let sort_height = channel.get_sortitions_processed() + 1; let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5009,7 +4977,7 @@ fn test_v1_unlock_height_with_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5043,7 +5011,7 @@ fn test_v1_unlock_height_with_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5207,7 +5175,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5228,7 +5196,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -5256,7 +5224,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -5277,7 +5245,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -5311,7 +5279,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -5323,7 +5291,7 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) { let mut have_expected_payout = false; if height < epoch_2_1 + (reward_cycle_len as u64) { diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 774a83f712..dabd3ee9ed 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -198,14 +198,14 @@ fn disable_pox() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -227,7 +227,7 @@ fn disable_pox() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -255,7 +255,7 @@ fn disable_pox() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -276,7 +276,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); let tx = make_contract_call( @@ -295,7 +295,7 @@ fn disable_pox() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -322,7 +322,7 @@ fn disable_pox() { &[Value::UInt(increase_by.into())], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); for _i in 0..15 { @@ -349,7 +349,7 @@ fn disable_pox() { &[Value::UInt(5000)], ); - info!("Submit 2.1 stack-increase tx to {:?}", &http_origin); + info!("Submit 2.1 stack-increase tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // finish the cycle after the 2.2 transition, @@ -395,7 +395,7 @@ fn disable_pox() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -407,7 +407,7 @@ fn disable_pox() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); @@ -504,14 +504,12 @@ fn disable_pox() { for reward_cycle in reward_cycle_min..(reward_cycle_max + 1) { let cycle_counts = &reward_cycle_pox_addrs[&reward_cycle]; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, expected_slots[&reward_cycle][pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -722,14 +720,14 @@ fn pox_2_unlock_all() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -751,7 +749,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -780,7 +778,7 @@ fn pox_2_unlock_all() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -812,7 +810,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); @@ -832,7 +830,7 @@ fn pox_2_unlock_all() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // that it can mine _at all_ is a success criterion @@ -970,7 +968,7 @@ fn pox_2_unlock_all() { 1_000_000, ); - info!("Submit stack transfer tx to {:?}", &http_origin); + info!("Submit stack transfer tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // this wakes up the node to mine the transaction @@ -1063,7 +1061,7 @@ fn pox_2_unlock_all() { &iconn, &tip, &boot_code_id("pox-2", false), - &format!("(get-burn-block-info? pox-addrs u{})", height), + &format!("(get-burn-block-info? pox-addrs u{height})"), ) .expect_optional() .unwrap() @@ -1075,7 +1073,7 @@ fn pox_2_unlock_all() { .expect_list() .unwrap(); - debug!("Test burnchain height {}", height); + debug!("Test burnchain height {height}"); if !burnchain_config.is_in_prepare_phase(height) && !pox_addrs.is_empty() { assert_eq!(pox_addrs.len(), 2); let pox_addr_0 = PoxAddress::try_from_pox_tuple(false, &pox_addrs[0]).unwrap(); @@ -1154,18 +1152,16 @@ fn pox_2_unlock_all() { let cycle_counts = match reward_cycle_pox_addrs.get(&reward_cycle) { Some(x) => x, None => { - info!("No reward cycle entry = {}", reward_cycle); + info!("No reward cycle entry = {reward_cycle}"); continue; } }; - assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {} is mismatched with the actual count.", reward_cycle); + assert_eq!(cycle_counts.len(), expected_slots[&reward_cycle].len(), "The number of expected PoX addresses in reward cycle {reward_cycle} is mismatched with the actual count."); for (pox_addr, slots) in cycle_counts.iter() { assert_eq!( *slots, expected_slots[&reward_cycle][pox_addr], - "The number of expected slots for PoX address {} in reward cycle {} is mismatched with the actual count.", - &pox_addr, - reward_cycle, + "The number of expected slots for PoX address {pox_addr} in reward cycle {reward_cycle} is mismatched with the actual count." ); info!("PoX payment received"; "cycle" => reward_cycle, "pox_addr" => %pox_addr, "slots" => slots); } @@ -1335,9 +1331,9 @@ fn test_pox_reorg_one_flap() { let rpc_port = 41063 + 10 * i; let p2p_port = 41063 + 10 * i + 1; - conf.node.rpc_bind = format!("127.0.0.1:{}", rpc_port); - conf.node.data_url = format!("http://127.0.0.1:{}", rpc_port); - conf.node.p2p_bind = format!("127.0.0.1:{}", p2p_port); + conf.node.rpc_bind = format!("127.0.0.1:{rpc_port}"); + conf.node.data_url = format!("http://127.0.0.1:{rpc_port}"); + conf.node.p2p_bind = format!("127.0.0.1:{p2p_port}"); confs.push(conf); } @@ -1350,9 +1346,8 @@ fn test_pox_reorg_one_flap() { for conf in confs.iter_mut().skip(1) { conf.node.set_bootstrap_nodes( format!( - "{}@{}", - &StacksPublicKey::from_private(&node_privkey_1).to_hex(), - p2p_bind + "{}@{p2p_bind}", + &StacksPublicKey::from_private(&node_privkey_1).to_hex() ), chain_id, peer_version, @@ -1433,7 +1428,7 @@ fn test_pox_reorg_one_flap() { loop { let tip_info_opt = get_chain_info_opt(&confs[0]); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner 0: {:?}\n\n", &tip_info); + eprintln!("\n\nMiner 0: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } @@ -1448,16 +1443,16 @@ fn test_pox_reorg_one_flap() { } for (i, conf) in confs.iter().enumerate().skip(1) { - eprintln!("\n\nBoot miner {}\n\n", i); + eprintln!("\n\nBoot miner {i}\n\n"); loop { let tip_info_opt = get_chain_info_opt(conf); if let Some(tip_info) = tip_info_opt { - eprintln!("\n\nMiner {}: {:?}\n\n", i, &tip_info); + eprintln!("\n\nMiner {i}: {tip_info:?}\n\n"); if tip_info.stacks_tip_height > 0 { break; } } else { - eprintln!("\n\nWaiting for miner {}...\n\n", i); + eprintln!("\n\nWaiting for miner {i}...\n\n"); } next_block_and_iterate(&mut btc_regtest_controller, &blocks_processed[i], 5_000); } @@ -1524,7 +1519,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); if tip_info.burn_block_height == 220 { at_220 = true; } @@ -1535,7 +1530,7 @@ fn test_pox_reorg_one_flap() { let mut cnt = 0; for tx_chain in all_txs { for tx in tx_chain { - eprintln!("\n\nSubmit tx {}\n\n", &cnt); + eprintln!("\n\nSubmit tx {cnt}\n\n"); submit_tx(&http_origin, &tx); cnt += 1; } @@ -1543,7 +1538,7 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); assert!(tip_info.burn_block_height <= 220); } @@ -1552,7 +1547,7 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); @@ -1565,13 +1560,13 @@ fn test_pox_reorg_one_flap() { // miner 0 mines a prepare phase and confirms a hidden anchor block. // miner 1 is disabled for these prepare phases for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1583,20 +1578,20 @@ fn test_pox_reorg_one_flap() { info!("####################### end of cycle ##############################"); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } info!("####################### end of cycle ##############################"); // miner 1 mines a prepare phase and confirms a hidden anchor block. // miner 0 is disabled for this prepare phase for i in 0..10 { - eprintln!("\n\nBuild block {}\n\n", i); + eprintln!("\n\nBuild block {i}\n\n"); btc_regtest_controller.build_next_block(1); sleep_ms(block_time_ms); for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } if i >= reward_cycle_len - prepare_phase_len - 2 { @@ -1609,7 +1604,7 @@ fn test_pox_reorg_one_flap() { let mut max_stacks_tip = 0; for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); // miner 1's history overtakes miner 0's. // Miner 1 didn't see cycle 22's anchor block, but it just mined an anchor block for cycle @@ -1625,22 +1620,19 @@ fn test_pox_reorg_one_flap() { for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Tip for miner {}: {:?}", i, &tip_info); + info!("Tip for miner {i}: {tip_info:?}"); } // resume block propagation env::set_var("STACKS_HIDE_BLOCKS_AT_HEIGHT", "[]"); // wait for all blocks to propagate - eprintln!( - "Wait for all blocks to propagate; stacks tip height is {}", - max_stacks_tip - ); + eprintln!("Wait for all blocks to propagate; stacks tip height is {max_stacks_tip}"); wait_pox_stragglers(&confs, max_stacks_tip, block_time_ms); // nodes now agree on stacks affirmation map for (i, c) in confs.iter().enumerate() { let tip_info = get_chain_info(c); - info!("Final tip for miner {}: {:?}", i, &tip_info); + info!("Final tip for miner {i}: {tip_info:?}"); } } diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index a0cbbfe876..12ae11945d 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -227,9 +227,8 @@ fn trait_invocation_behavior() { submit_tx(&http_origin, &publish_invoke); info!( - "At height = {}, epoch-2.1 = {}", - get_chain_info(&conf).burn_block_height, - epoch_2_1 + "At height = {}, epoch-2.1 = {epoch_2_1}", + get_chain_info(&conf).burn_block_height ); // wait until just before epoch 2.1 loop { @@ -509,7 +508,7 @@ fn trait_invocation_behavior() { next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Total spender txs = {}", spender_nonce); + info!("Total spender txs = {spender_nonce}"); let blocks = test_observer::get_blocks(); @@ -630,7 +629,7 @@ fn trait_invocation_behavior() { } for (key, value) in transaction_receipts.iter() { - eprintln!("{} => {} of {}", key, value.0, value.1); + eprintln!("{key} => {} of {}", value.0, value.1); } test_observer::clear(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index e39255678d..cfcc8d0d52 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -220,14 +220,14 @@ fn fix_to_pox_contract() { // stack right away let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_1 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_1,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_1}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() .unwrap(); let pox_addr_tuple_3 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_3,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_3}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -249,7 +249,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.05 stacking tx to {:?}", &http_origin); + info!("Submit 2.05 stacking tx to {http_origin:?}"); submit_tx(&http_origin, &tx); // wait until just before epoch 2.1 @@ -278,7 +278,7 @@ fn fix_to_pox_contract() { let sort_height = channel.get_sortitions_processed(); let pox_addr_tuple_2 = execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash_2,), + &format!("{{ hashbytes: 0x{pox_pubkey_hash_2}, version: 0x00 }}"), ClarityVersion::Clarity2, ) .unwrap() @@ -299,7 +299,7 @@ fn fix_to_pox_contract() { ], ); - info!("Submit 2.1 stacking tx to {:?}", &http_origin); + info!("Submit 2.1 stacking tx to {http_origin:?}"); sleep_ms(5_000); submit_tx(&http_origin, &tx); diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 5a8de4d3bd..574b18e964 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -872,7 +872,7 @@ fn integration_test_get_info() { // explicit trait compliance let path = format!("{http_origin}/v2/traits/{contract_addr}/impl-trait-contract/{contract_addr}/get-info/trait-1"); let res = client.get(&path).send().unwrap().json::().unwrap(); - eprintln!("Test: GET {}", path); + eprintln!("Test: GET {path}"); assert!(res.is_implemented); // No trait found diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 1f7252ec5f..167a66f7db 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -663,9 +663,8 @@ pub fn next_block_and_wait_with_timeout( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); info!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -692,9 +691,8 @@ pub fn next_block_and_iterate( ) -> bool { let current = blocks_processed.load(Ordering::SeqCst); eprintln!( - "Issuing block at {}, waiting for bump ({})", - get_epoch_time_secs(), - current + "Issuing block at {}, waiting for bump ({current})", + get_epoch_time_secs() ); btc_controller.build_next_block(1); let start = Instant::now(); @@ -1064,7 +1062,7 @@ fn bitcoind_integration_test() { // let's query the miner's account nonce: - eprintln!("Miner account: {}", miner_account); + eprintln!("Miner account: {miner_account}"); let account = get_account(&http_origin, &miner_account); assert_eq!(account.balance, 0); @@ -1708,7 +1706,7 @@ fn liquid_ustx_integration() { let dropped_txs = test_observer::get_memtx_drops(); assert_eq!(dropped_txs.len(), 1); assert_eq!(&dropped_txs[0].1, "ReplaceByFee"); - assert_eq!(&dropped_txs[0].0, &format!("0x{}", replaced_txid)); + assert_eq!(&dropped_txs[0].0, &format!("0x{replaced_txid}")); // mine 1 burn block for the miner to issue the next block next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); @@ -3555,9 +3553,8 @@ fn microblock_fork_poison_integration_test() { ); eprintln!( - "Created first microblock: {}: {:?}", - &first_microblock.block_hash(), - &first_microblock + "Created first microblock: {}: {first_microblock:?}", + &first_microblock.block_hash() ); // NOTE: this microblock conflicts because it has the same parent as the first microblock, @@ -4015,7 +4012,7 @@ fn microblock_integration_test() { burn_blocks_with_burns.len() ); for burn_block in burn_blocks_with_burns { - eprintln!("{}", burn_block); + eprintln!("{burn_block}"); } let mut prior = None; @@ -5270,9 +5267,9 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) + (crash-me \"large-contract-{}-{ix}\")) ", - &format!("large-contract-{}-{ix}", &spender_addrs_c32[ix]) + &spender_addrs_c32[ix] ) )] } else { @@ -5325,8 +5322,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { ) ) (begin - (crash-me \"{}\")) - ", &format!("small-contract-{}-{ix}-{i}", &spender_addrs_c32[ix])) + (crash-me \"small-contract-{}-{ix}-{i}\")) + ", spender_addrs_c32[ix]) ); ret.push(tx); } @@ -7758,7 +7755,7 @@ fn atlas_integration_test() { let mut attachments_did_sync = false; let mut timeout = 60; while !attachments_did_sync { - let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); + let zonefile_hex = hex_bytes(&format!("facade0{i}")).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); let path = format!("{http_origin}/v2/attachments/{}", hashed_zonefile.to_hex()); let res = client @@ -7872,7 +7869,7 @@ fn atlas_integration_test() { let user = StacksPrivateKey::new(); let zonefile_hex = format!("facade0{i}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - let name = format!("johndoe{}", i); + let name = format!("johndoe{i}"); let tx = make_contract_call( &user_1, 2 + i, @@ -9621,7 +9618,7 @@ fn test_problematic_txs_are_not_stored() { let edge_repeat_factor = AST_CALL_STACK_DEPTH_BUFFER + (MAX_CALL_STACK_DEPTH as u64) - 1; let tx_edge_body_start = "{ a : ".repeat(edge_repeat_factor as usize); let tx_edge_body_end = "} ".repeat(edge_repeat_factor as usize); - let tx_edge_body = format!("{}u1 {}", tx_edge_body_start, tx_edge_body_end); + let tx_edge_body = format!("{tx_edge_body_start}u1 {tx_edge_body_end}"); let tx_edge = make_contract_publish( &spender_sk_1, @@ -10477,8 +10474,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad block @@ -11225,8 +11222,8 @@ fn test_problematic_microblocks_are_not_relayed_or_stored() { let follower_tip_info = get_chain_info(&follower_conf); eprintln!( - "\nFollower is at burn block {} stacks block {} (bad block is {})\n", - follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height, bad_block_height + "\nFollower is at burn block {} stacks block {} (bad block is {bad_block_height})\n", + follower_tip_info.burn_block_height, follower_tip_info.stacks_tip_height ); // follower rejects the bad microblock -- can't append subsequent blocks @@ -11514,7 +11511,7 @@ fn make_mblock_tx_chain(privk: &StacksPrivateKey, fee_plus: u64, chain_id: u32) let mut addr_prefix = addr.to_string(); let _ = addr_prefix.split_off(12); let contract_name = format!("crct-{nonce}-{addr_prefix}-{random_iters}"); - eprintln!("Make tx {}", &contract_name); + eprintln!("Make tx {contract_name}"); let tx = make_contract_publish_microblock_only( privk, nonce, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 7fcfc6b3f3..3be157abaf 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -1523,7 +1523,7 @@ fn multiple_miners() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), @@ -3861,7 +3861,7 @@ fn multiple_miners_with_nakamoto_blocks() { let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); let node_1_pk = StacksPublicKey::from_private(&node_1_sk); - conf_node_2.node.working_dir = format!("{}-{}", conf_node_2.node.working_dir, "1"); + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); conf_node_2.node.set_bootstrap_nodes( format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), diff --git a/testnet/stacks-node/src/tests/stackerdb.rs b/testnet/stacks-node/src/tests/stackerdb.rs index 70d820fbb1..c68b477b47 100644 --- a/testnet/stacks-node/src/tests/stackerdb.rs +++ b/testnet/stacks-node/src/tests/stackerdb.rs @@ -355,7 +355,7 @@ fn test_stackerdb_event_observer() { for i in 0..6 { let slot_id = i as u32; let privk = &privks[i / 3]; - let chunk_str = format!("Hello chunks {}", &i); + let chunk_str = format!("Hello chunks {i}"); let ack = post_stackerdb_chunk( &http_origin, &contract_id, @@ -364,7 +364,7 @@ fn test_stackerdb_event_observer() { slot_id, 1, ); - debug!("ACK: {:?}", &ack); + debug!("ACK: {ack:?}"); let data = get_stackerdb_chunk(&http_origin, &contract_id, slot_id, Some(1)); assert_eq!(data, chunk_str.as_bytes().to_vec());