From 6d27661ce975f508ca8134238f1ff8e94e0c01b0 Mon Sep 17 00:00:00 2001 From: "keroroxx520@gmail.com" Date: Mon, 13 Jan 2025 00:05:31 +0800 Subject: [PATCH] feat: impl reqpool --- reqpool/Cargo.toml | 33 +++++ reqpool/src/config.rs | 10 ++ reqpool/src/lib.rs | 17 +++ reqpool/src/macros.rs | 44 ++++++ reqpool/src/memory_pool.rs | 115 +++++++++++++++ reqpool/src/redis_pool.rs | 191 +++++++++++++++++++++++++ reqpool/src/request.rs | 286 +++++++++++++++++++++++++++++++++++++ reqpool/src/traits.rs | 45 ++++++ reqpool/src/utils.rs | 27 ++++ 9 files changed, 768 insertions(+) create mode 100644 reqpool/Cargo.toml create mode 100644 reqpool/src/config.rs create mode 100644 reqpool/src/lib.rs create mode 100644 reqpool/src/macros.rs create mode 100644 reqpool/src/memory_pool.rs create mode 100644 reqpool/src/redis_pool.rs create mode 100644 reqpool/src/request.rs create mode 100644 reqpool/src/traits.rs create mode 100644 reqpool/src/utils.rs diff --git a/reqpool/Cargo.toml b/reqpool/Cargo.toml new file mode 100644 index 00000000..5f41576a --- /dev/null +++ b/reqpool/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "raiko-reqpool" +version = "0.1.0" +authors = ["Taiko Labs"] +edition = "2021" + +[dependencies] +raiko-lib = { workspace = true } +raiko-core = { workspace = true } +raiko-redis-derive = { workspace = true } +num_enum = { workspace = true } +chrono = { workspace = true, features = ["serde"] } +thiserror = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +serde_with = { workspace = true } +hex = { workspace = true } +tracing = { workspace = true } +anyhow = { workspace = true } +tokio = { workspace = true } +async-trait = { workspace = true } +redis = { workspace = true } +backoff = { workspace = true } +derive-getters = { workspace = true } +proc-macro2 = { workspace = true } +quote = { workspace = true } +syn = { workspace = true } +alloy-primitives = { workspace = true } + +[dev-dependencies] +rand = "0.9.0-alpha.1" # This is an alpha version, that has rng.gen_iter::() +rand_chacha = "0.9.0-alpha.1" +tempfile = "3.10.1" diff --git a/reqpool/src/config.rs b/reqpool/src/config.rs new file mode 100644 index 00000000..0050daa3 --- /dev/null +++ b/reqpool/src/config.rs @@ -0,0 +1,10 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize)] +/// The configuration for the redis-backend request pool +pub struct RedisPoolConfig { + /// The URL of the Redis database, e.g. "redis://localhost:6379" + pub redis_url: String, + /// The TTL of the Redis database + pub redis_ttl: u64, +} diff --git a/reqpool/src/lib.rs b/reqpool/src/lib.rs new file mode 100644 index 00000000..6aa88c3b --- /dev/null +++ b/reqpool/src/lib.rs @@ -0,0 +1,17 @@ +mod config; +mod macros; +mod memory_pool; +mod redis_pool; +mod request; +mod traits; +mod utils; + +// Re-export +pub use config::RedisPoolConfig; +pub use redis_pool::RedisPool; +pub use request::{ + AggregationRequestEntity, AggregationRequestKey, RequestEntity, RequestKey, + SingleProofRequestEntity, SingleProofRequestKey, Status, StatusWithContext, +}; +pub use traits::{Pool, PoolResult, PoolWithTrace}; +pub use utils::proof_key_to_hack_request_key; diff --git a/reqpool/src/macros.rs b/reqpool/src/macros.rs new file mode 100644 index 00000000..fb36b349 --- /dev/null +++ b/reqpool/src/macros.rs @@ -0,0 +1,44 @@ +/// This macro implements the Display trait for a type by using serde_json's pretty printing. +/// If the type cannot be serialized to JSON, it falls back to using Debug formatting. +/// +/// # Example +/// +/// ```rust +/// use serde::{Serialize, Deserialize}; +/// +/// #[derive(Debug, Serialize, Deserialize)] +/// struct Person { +/// name: String, +/// age: u32 +/// } +/// +/// impl_display_using_json_pretty!(Person); +/// +/// let person = Person { +/// name: "John".to_string(), +/// age: 30 +/// }; +/// +/// // Will print: +/// // { +/// // "name": "John", +/// // "age": 30 +/// // } +/// println!("{}", person); +/// ``` +/// +/// The type must implement serde's Serialize trait for JSON serialization to work. +/// If serialization fails, it will fall back to using the Debug implementation. +#[macro_export] +macro_rules! impl_display_using_json_pretty { + ($type:ty) => { + impl std::fmt::Display for $type { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match serde_json::to_string_pretty(self) { + Ok(s) => write!(f, "{}", s), + Err(_) => write!(f, "{:?}", self), + } + } + } + }; +} diff --git a/reqpool/src/memory_pool.rs b/reqpool/src/memory_pool.rs new file mode 100644 index 00000000..410da5d1 --- /dev/null +++ b/reqpool/src/memory_pool.rs @@ -0,0 +1,115 @@ +// use std::collections::HashMap; + +// use chrono::Utc; + +// use crate::{ +// request::{RequestEntity, RequestKey, Status, StatusWithContext}, +// traits::{Pool, PoolWithTrace}, +// }; + +// #[derive(Debug, Clone)] +// pub struct MemoryPool { +// /// The live requests in the pool +// pending: HashMap, +// /// The trace of requests +// trace: Vec<(RequestKey, RequestEntity, StatusWithContext)>, +// } + +// impl Pool for MemoryPool { +// type Config = (); + +// fn new(_config: Self::Config) -> Self { +// Self { +// lives: HashMap::new(), +// trace: Vec::new(), +// } +// } + +// fn add(&mut self, request_key: RequestKey, request_entity: RequestEntity) { +// let status = StatusWithContext::new(Status::Registered, Utc::now()); + +// let old = self.lives.insert( +// request_key.clone(), +// (request_entity.clone(), status.clone()), +// ); + +// if let Some((_, old_status)) = old { +// tracing::error!( +// "MemoryPool.add: request key already exists, {request_key:?}, old status: {old_status:?}" +// ); +// } else { +// tracing::info!("MemoryPool.add, {request_key:?}, status: {status:?}"); +// } + +// self.trace.push((request_key, request_entity, status)); +// } + +// fn remove(&mut self, request_key: &RequestKey) { +// match self.lives.remove(request_key) { +// Some((_, status)) => { +// tracing::info!("MemoryPool.remove, {request_key:?}, status: {status:?}"); +// } +// None => { +// tracing::error!("MemoryPool.remove: request key not found, {request_key:?}"); +// } +// } +// } + +// fn get(&self, request_key: &RequestKey) -> Option<(RequestEntity, StatusWithContext)> { +// self.lives.get(request_key).cloned() +// } + +// fn get_status(&self, request_key: &RequestKey) -> Option { +// self.lives +// .get(request_key) +// .map(|(_, status)| status.clone()) +// } + +// fn update_status(&mut self, request_key: &RequestKey, status: StatusWithContext) { +// match self.lives.remove(request_key) { +// Some((entity, old_status)) => { +// tracing::info!( +// "MemoryPool.update_status, {request_key:?}, old status: {old_status:?}, new status: {status:?}" +// ); +// self.lives +// .insert(request_key.clone(), (entity.clone(), status.clone())); +// self.trace.push((request_key.clone(), entity, status)); +// } +// None => { +// tracing::error!( +// "MemoryPool.update_status: request key not found, discard it, {request_key:?}" +// ); +// } +// } +// } +// } + +// impl PoolWithTrace for MemoryPool { +// fn get_all_live(&self) -> Vec<(RequestKey, RequestEntity, StatusWithContext)> { +// self.lives +// .iter() +// .map(|(k, v)| (k.clone(), v.0.clone(), v.1.clone())) +// .collect() +// } + +// fn get_all_trace(&self) -> Vec<(RequestKey, RequestEntity, StatusWithContext)> { +// self.trace.clone() +// } + +// fn trace( +// &self, +// request_key: &RequestKey, +// ) -> ( +// Option<(RequestEntity, StatusWithContext)>, +// Vec<(RequestKey, RequestEntity, StatusWithContext)>, +// ) { +// let live = self.lives.get(request_key).cloned(); +// let traces = self +// .trace +// .iter() +// .filter(|(k, _, _)| k == request_key) +// .cloned() +// .collect(); +// (live, traces) +// } +// } diff --git a/reqpool/src/redis_pool.rs b/reqpool/src/redis_pool.rs new file mode 100644 index 00000000..f52ba218 --- /dev/null +++ b/reqpool/src/redis_pool.rs @@ -0,0 +1,191 @@ +use crate::{ + impl_display_using_json_pretty, proof_key_to_hack_request_key, Pool, PoolResult, + RedisPoolConfig, RequestEntity, RequestKey, StatusWithContext, +}; +use backoff::{exponential::ExponentialBackoff, SystemClock}; +use raiko_lib::prover::{IdStore, IdWrite, ProofKey, ProverError, ProverResult}; +use raiko_redis_derive::RedisValue; +use redis::{Client, Commands, RedisResult}; +use serde::{Deserialize, Serialize}; +use std::time::Duration; + +#[derive(Debug, Clone)] +pub struct RedisPool { + client: Client, + config: RedisPoolConfig, +} + +impl Pool for RedisPool { + fn add( + &mut self, + request_key: RequestKey, + request_entity: RequestEntity, + status: StatusWithContext, + ) -> PoolResult<()> { + tracing::info!("RedisPool.add: {request_key}, {status}"); + let request_entity_and_status = RequestEntityAndStatus { + entity: request_entity, + status, + }; + self.conn() + .map_err(|e| e.to_string())? + .set_ex( + request_key, + request_entity_and_status, + self.config.redis_ttl, + ) + .map_err(|e| e.to_string())?; + Ok(()) + } + + fn remove(&mut self, request_key: &RequestKey) -> PoolResult { + tracing::info!("RedisPool.remove: {request_key}"); + let result: usize = self + .conn() + .map_err(|e| e.to_string())? + .del(request_key) + .map_err(|e| e.to_string())?; + Ok(result) + } + + fn get( + &mut self, + request_key: &RequestKey, + ) -> PoolResult> { + let result: RedisResult = + self.conn().map_err(|e| e.to_string())?.get(request_key); + match result { + Ok(value) => Ok(Some(value.into())), + Err(e) if e.kind() == redis::ErrorKind::TypeError => Ok(None), + Err(e) => Err(e.to_string()), + } + } + + fn get_status(&mut self, request_key: &RequestKey) -> PoolResult> { + self.get(request_key).map(|v| v.map(|v| v.1)) + } + + fn update_status( + &mut self, + request_key: RequestKey, + status: StatusWithContext, + ) -> PoolResult { + tracing::info!("RedisPool.update_status: {request_key}, {status}"); + match self.get(&request_key)? { + Some((entity, old_status)) => { + self.add(request_key, entity, status)?; + Ok(old_status) + } + None => Err("Request not found".to_string()), + } + } +} + +#[async_trait::async_trait] +impl IdStore for RedisPool { + async fn read_id(&mut self, proof_key: ProofKey) -> ProverResult { + let hack_request_key = proof_key_to_hack_request_key(proof_key); + + tracing::info!("RedisPool.read_id: {hack_request_key}"); + + let result: RedisResult = self + .conn() + .map_err(|e| e.to_string())? + .get(hack_request_key); + match result { + Ok(value) => Ok(value.into()), + Err(e) => Err(ProverError::StoreError(e.to_string())), + } + } +} + +#[async_trait::async_trait] +impl IdWrite for RedisPool { + async fn store_id(&mut self, proof_key: ProofKey, id: String) -> ProverResult<()> { + let hack_request_key = proof_key_to_hack_request_key(proof_key); + + tracing::info!("RedisPool.store_id: {hack_request_key}, {id}"); + + self.conn() + .map_err(|e| e.to_string())? + .set_ex(hack_request_key, id, self.config.redis_ttl) + .map_err(|e| ProverError::StoreError(e.to_string()))?; + Ok(()) + } + + async fn remove_id(&mut self, proof_key: ProofKey) -> ProverResult<()> { + let hack_request_key = proof_key_to_hack_request_key(proof_key); + + tracing::info!("RedisPool.remove_id: {hack_request_key}"); + + self.conn() + .map_err(|e| e.to_string())? + .del(hack_request_key) + .map_err(|e| ProverError::StoreError(e.to_string()))?; + Ok(()) + } +} + +impl RedisPool { + pub fn open(config: RedisPoolConfig) -> Result { + tracing::info!("RedisPool.open: connecting to redis: {}", config.redis_url); + + let client = Client::open(config.redis_url.clone())?; + Ok(Self { client, config }) + } + + fn conn(&mut self) -> Result { + let backoff: ExponentialBackoff = ExponentialBackoff { + initial_interval: Duration::from_secs(10), + max_interval: Duration::from_secs(60), + max_elapsed_time: Some(Duration::from_secs(300)), + ..Default::default() + }; + + backoff::retry(backoff, || match self.client.get_connection() { + Ok(conn) => Ok(conn), + Err(e) => { + tracing::error!( + "RedisPool.get_connection: failed to connect to redis: {e:?}, retrying..." + ); + + self.client = redis::Client::open(self.config.redis_url.clone())?; + Err(backoff::Error::Transient { + err: e, + retry_after: None, + }) + } + }) + .map_err(|e| match e { + backoff::Error::Transient { + err, + retry_after: _, + } + | backoff::Error::Permanent(err) => err, + }) + } +} + +/// A internal wrapper for request entity and status, used for redis serialization +#[derive(PartialEq, Debug, Clone, Deserialize, Serialize, RedisValue)] +struct RequestEntityAndStatus { + entity: RequestEntity, + status: StatusWithContext, +} + +impl From<(RequestEntity, StatusWithContext)> for RequestEntityAndStatus { + fn from(value: (RequestEntity, StatusWithContext)) -> Self { + Self { + entity: value.0, + status: value.1, + } + } +} + +impl From for (RequestEntity, StatusWithContext) { + fn from(value: RequestEntityAndStatus) -> Self { + (value.entity, value.status) + } +} + +impl_display_using_json_pretty!(RequestEntityAndStatus); diff --git a/reqpool/src/request.rs b/reqpool/src/request.rs new file mode 100644 index 00000000..5a71aeb5 --- /dev/null +++ b/reqpool/src/request.rs @@ -0,0 +1,286 @@ +use crate::impl_display_using_json_pretty; +use alloy_primitives::Address; +use chrono::{DateTime, Utc}; +use derive_getters::Getters; +use raiko_core::interfaces::ProverSpecificOpts; +use raiko_lib::{ + input::BlobProofType, + primitives::{ChainId, B256}, + proof_type::ProofType, + prover::Proof, +}; +use raiko_redis_derive::RedisValue; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DisplayFromStr}; +use std::collections::HashMap; + +#[derive(RedisValue, PartialEq, Debug, Clone, Deserialize, Serialize, Eq, PartialOrd, Ord)] +#[serde(rename_all = "snake_case")] +/// The status of a request +pub enum Status { + // === Normal status === + /// The request is registered but not yet started + Registered, + + /// The request is in progress + WorkInProgress, + + // /// The request is in progress of proving + // WorkInProgressProving { + // /// The proof ID + // /// For SP1 and RISC0 proof type, it is the proof ID returned by the network prover, + // /// otherwise, it should be empty. + // proof_id: String, + // }, + /// The request is successful + Success { + /// The proof of the request + proof: Proof, + }, + + // === Cancelled status === + /// The request is cancelled + Cancelled, + + // === Error status === + /// The request is failed with an error + Failed { + /// The error message + error: String, + }, +} + +impl Status { + pub fn is_success(&self) -> bool { + matches!(self, Status::Success { .. }) + } +} + +#[derive( + PartialEq, Debug, Clone, Deserialize, Serialize, Eq, PartialOrd, Ord, RedisValue, Getters, +)] +/// The status of a request with context +pub struct StatusWithContext { + /// The status of the request + status: Status, + /// The timestamp of the status + timestamp: DateTime, +} + +impl StatusWithContext { + pub fn new(status: Status, timestamp: DateTime) -> Self { + Self { status, timestamp } + } + + pub fn new_registered() -> Self { + Self::new(Status::Registered, chrono::Utc::now()) + } + + pub fn new_cancelled() -> Self { + Self::new(Status::Cancelled, chrono::Utc::now()) + } + + pub fn into_status(self) -> Status { + self.status + } +} + +impl From for StatusWithContext { + fn from(status: Status) -> Self { + Self::new(status, chrono::Utc::now()) + } +} + +/// The key to identify a request in the pool +#[derive( + PartialEq, Debug, Clone, Deserialize, Serialize, Eq, PartialOrd, Ord, Hash, RedisValue, +)] +pub enum RequestKey { + SingleProof(SingleProofRequestKey), + Aggregation(AggregationRequestKey), +} + +impl RequestKey { + pub fn proof_type(&self) -> &ProofType { + match self { + RequestKey::SingleProof(key) => &key.proof_type, + RequestKey::Aggregation(key) => &key.proof_type, + } + } +} + +/// The key to identify a request in the pool +#[derive( + PartialEq, Debug, Clone, Deserialize, Serialize, Eq, PartialOrd, Ord, Hash, RedisValue, Getters, +)] +pub struct SingleProofRequestKey { + /// The chain ID of the request + chain_id: ChainId, + /// The block number of the request + block_number: u64, + /// The block hash of the request + block_hash: B256, + /// The proof type of the request + proof_type: ProofType, + /// The prover of the request + prover_address: String, +} + +impl SingleProofRequestKey { + pub fn new( + chain_id: ChainId, + block_number: u64, + block_hash: B256, + proof_type: ProofType, + prover_address: String, + ) -> Self { + Self { + chain_id, + block_number, + block_hash, + proof_type, + prover_address, + } + } +} + +#[derive( + PartialEq, Debug, Clone, Deserialize, Serialize, Eq, PartialOrd, Ord, Hash, RedisValue, Getters, +)] +/// The key to identify an aggregation request in the pool +pub struct AggregationRequestKey { + // TODO add chain_id + proof_type: ProofType, + block_numbers: Vec, +} + +impl AggregationRequestKey { + pub fn new(proof_type: ProofType, block_numbers: Vec) -> Self { + Self { + proof_type, + block_numbers, + } + } +} + +impl From for RequestKey { + fn from(key: SingleProofRequestKey) -> Self { + RequestKey::SingleProof(key) + } +} + +impl From for RequestKey { + fn from(key: AggregationRequestKey) -> Self { + RequestKey::Aggregation(key) + } +} + +#[serde_as] +#[derive(PartialEq, Debug, Clone, Deserialize, Serialize, RedisValue, Getters)] +pub struct SingleProofRequestEntity { + /// The block number for the block to generate a proof for. + block_number: u64, + /// The l1 block number of the l2 block be proposed. + l1_inclusion_block_number: u64, + /// The network to generate the proof for. + network: String, + /// The L1 network to generate the proof for. + l1_network: String, + /// Graffiti. + graffiti: B256, + /// The protocol instance data. + #[serde_as(as = "DisplayFromStr")] + prover: Address, + /// The proof type. + proof_type: ProofType, + /// Blob proof type. + blob_proof_type: BlobProofType, + #[serde(flatten)] + /// Additional prover params. + prover_args: HashMap, +} + +impl SingleProofRequestEntity { + pub fn new( + block_number: u64, + l1_inclusion_block_number: u64, + network: String, + l1_network: String, + graffiti: B256, + prover: Address, + proof_type: ProofType, + blob_proof_type: BlobProofType, + prover_args: HashMap, + ) -> Self { + Self { + block_number, + l1_inclusion_block_number, + network, + l1_network, + graffiti, + prover, + proof_type, + blob_proof_type, + prover_args, + } + } +} + +#[derive(PartialEq, Debug, Clone, Deserialize, Serialize, RedisValue, Getters)] +pub struct AggregationRequestEntity { + /// The block numbers and l1 inclusion block numbers for the blocks to aggregate proofs for. + aggregation_ids: Vec, + /// The block numbers and l1 inclusion block numbers for the blocks to aggregate proofs for. + proofs: Vec, + /// The proof type. + proof_type: ProofType, + #[serde(flatten)] + /// Any additional prover params in JSON format. + prover_args: ProverSpecificOpts, +} + +impl AggregationRequestEntity { + pub fn new( + aggregation_ids: Vec, + proofs: Vec, + proof_type: ProofType, + prover_args: ProverSpecificOpts, + ) -> Self { + Self { + aggregation_ids, + proofs, + proof_type, + prover_args, + } + } +} + +/// The entity of a request +#[derive(PartialEq, Debug, Clone, Deserialize, Serialize, RedisValue)] +pub enum RequestEntity { + SingleProof(SingleProofRequestEntity), + Aggregation(AggregationRequestEntity), +} + +impl From for RequestEntity { + fn from(entity: SingleProofRequestEntity) -> Self { + RequestEntity::SingleProof(entity) + } +} + +impl From for RequestEntity { + fn from(entity: AggregationRequestEntity) -> Self { + RequestEntity::Aggregation(entity) + } +} + +// === impl Display using json_pretty === + +impl_display_using_json_pretty!(Status); +impl_display_using_json_pretty!(StatusWithContext); +impl_display_using_json_pretty!(RequestKey); +impl_display_using_json_pretty!(SingleProofRequestKey); +impl_display_using_json_pretty!(AggregationRequestKey); +impl_display_using_json_pretty!(RequestEntity); +impl_display_using_json_pretty!(SingleProofRequestEntity); +impl_display_using_json_pretty!(AggregationRequestEntity); diff --git a/reqpool/src/traits.rs b/reqpool/src/traits.rs new file mode 100644 index 00000000..243ecfb3 --- /dev/null +++ b/reqpool/src/traits.rs @@ -0,0 +1,45 @@ +use crate::request::{RequestEntity, RequestKey, StatusWithContext}; + +pub type PoolResult = Result; + +/// Pool maintains the requests and their statuses +pub trait Pool: Send + Sync + Clone { + /// Add a new request to the pool + fn add( + &mut self, + request_key: RequestKey, + request_entity: RequestEntity, + status: StatusWithContext, + ) -> PoolResult<()>; + + /// Remove a request from the pool, return the number of requests removed + fn remove(&mut self, request_key: &RequestKey) -> PoolResult; + + /// Get a request and status from the pool + fn get( + &mut self, + request_key: &RequestKey, + ) -> PoolResult>; + + /// Get the status of a request + fn get_status(&mut self, request_key: &RequestKey) -> PoolResult>; + + /// Update the status of a request, return the old status + fn update_status( + &mut self, + request_key: RequestKey, + status: StatusWithContext, + ) -> PoolResult; +} + +/// A pool extension that supports tracing +pub trait PoolWithTrace: Pool { + /// Get all trace of requests, with the given max depth. + fn trace_all(&self, max_depth: usize) -> Vec<(RequestKey, RequestEntity, StatusWithContext)>; + + /// Get the live entity and trace of a request + fn trace( + &self, + request_key: &RequestKey, + ) -> Vec<(RequestKey, RequestEntity, StatusWithContext)>; +} diff --git a/reqpool/src/utils.rs b/reqpool/src/utils.rs new file mode 100644 index 00000000..ba9d5c3d --- /dev/null +++ b/reqpool/src/utils.rs @@ -0,0 +1,27 @@ +use raiko_lib::{proof_type::ProofType, prover::ProofKey}; + +use crate::{RequestKey, SingleProofRequestKey}; + +/// Returns the proof key corresponding to the request key. +/// +/// During proving, the prover will store the network proof id into pool, which is identified by **proof key**. This +/// function is used to generate a unique proof key corresponding to the request key, so that we can store the +/// proof key into the pool. +/// +/// Note that this is a hack, and it should be removed in the future. +pub fn proof_key_to_hack_request_key(proof_key: ProofKey) -> RequestKey { + let (chain_id, block_number, block_hash, proof_type) = proof_key; + + // HACK: Use a special prover address as a mask, to distinguish from real + // RequestKeys + let hack_prover_address = String::from("0x1231231231231231231231231231231231231231"); + + SingleProofRequestKey::new( + chain_id, + block_number, + block_hash, + ProofType::try_from(proof_type).expect("unsupported proof type, it should not happen at proof_key_to_hack_request_key, please issue a bug report"), + hack_prover_address, + ) + .into() +}