Skip to content

Commit

Permalink
Cleanup AccountsDb::new_with_config() (#3270)
Browse files Browse the repository at this point in the history
The function takes an optional AccountsDbConfig parameter. At the top
of the function, use .unwrap_or_default() on this parameter to remove
repetitive .as_ref().map(...).unwrap_or_else() sequences
  • Loading branch information
steviez authored Oct 23, 2024
1 parent dc4b9dc commit b99e328
Showing 1 changed file with 26 additions and 78 deletions.
104 changes: 26 additions & 78 deletions accounts-db/src/accounts_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1927,103 +1927,51 @@ impl AccountsDb {
cluster_type: &ClusterType,
account_indexes: AccountSecondaryIndexes,
shrink_ratio: AccountShrinkThreshold,
mut accounts_db_config: Option<AccountsDbConfig>,
accounts_db_config: Option<AccountsDbConfig>,
accounts_update_notifier: Option<AccountsUpdateNotifier>,
exit: Arc<AtomicBool>,
) -> Self {
let default_accounts_db_config = AccountsDbConfig::default();

let accounts_index = AccountsIndex::new(
accounts_db_config.as_mut().and_then(|x| x.index.take()),
exit,
);
let base_working_path = accounts_db_config
.as_ref()
.and_then(|x| x.base_working_path.clone());
let accounts_hash_cache_path = accounts_db_config
.as_ref()
.and_then(|config| config.accounts_hash_cache_path.clone());
let skip_initial_hash_calc = accounts_db_config
.as_ref()
.map(|config| config.skip_initial_hash_calc)
.unwrap_or_default();

let ancient_append_vec_offset = accounts_db_config
.as_ref()
.and_then(|config| config.ancient_append_vec_offset)
.or(ANCIENT_APPEND_VEC_DEFAULT_OFFSET);

let exhaustively_verify_refcounts = accounts_db_config
.as_ref()
.map(|config| config.exhaustively_verify_refcounts)
.unwrap_or_default();

let create_ancient_storage = accounts_db_config
.as_ref()
.map(|config| config.create_ancient_storage)
.unwrap_or_default();

let test_partitioned_epoch_rewards = accounts_db_config
.as_ref()
.map(|config| config.test_partitioned_epoch_rewards)
.unwrap_or_default();

let test_skip_rewrites_but_include_in_bank_hash = accounts_db_config
.as_ref()
.map(|config| config.test_skip_rewrites_but_include_in_bank_hash)
.unwrap_or_default();
let accounts_db_config = accounts_db_config.unwrap_or_default();
let accounts_index = AccountsIndex::new(accounts_db_config.index.clone(), exit);
let base_working_path = accounts_db_config.base_working_path.clone();
let accounts_hash_cache_path = accounts_db_config.accounts_hash_cache_path.clone();

let test_partitioned_epoch_rewards = accounts_db_config.test_partitioned_epoch_rewards;
let partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig =
PartitionedEpochRewardsConfig::new(test_partitioned_epoch_rewards);

let read_cache_size = accounts_db_config
.as_ref()
.and_then(|config| config.read_cache_limit_bytes)
.unwrap_or((
Self::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO,
Self::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
));

let storage_access = accounts_db_config
.as_ref()
.map(|config| config.storage_access)
.unwrap_or_default();

let scan_filter_for_shrinking = accounts_db_config
.as_ref()
.map(|config| config.scan_filter_for_shrinking)
.unwrap_or_default();

let enable_experimental_accumulator_hash = accounts_db_config
.as_ref()
.map(|config| config.enable_experimental_accumulator_hash)
.unwrap_or(default_accounts_db_config.enable_experimental_accumulator_hash)
.into();
let read_cache_size = accounts_db_config.read_cache_limit_bytes.unwrap_or((
Self::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_LO,
Self::DEFAULT_MAX_READ_ONLY_CACHE_DATA_SIZE_HI,
));

let paths_is_empty = paths.is_empty();
let mut new = Self {
paths,
skip_initial_hash_calc,
ancient_append_vec_offset,
skip_initial_hash_calc: accounts_db_config.skip_initial_hash_calc,
ancient_append_vec_offset: accounts_db_config
.ancient_append_vec_offset
.or(ANCIENT_APPEND_VEC_DEFAULT_OFFSET),
cluster_type: Some(*cluster_type),
account_indexes,
shrink_ratio,
accounts_update_notifier,
create_ancient_storage,
create_ancient_storage: accounts_db_config.create_ancient_storage,
read_only_accounts_cache: ReadOnlyAccountsCache::new(
read_cache_size.0,
read_cache_size.1,
Self::READ_ONLY_CACHE_MS_TO_SKIP_LRU_UPDATE,
),
write_cache_limit_bytes: accounts_db_config
.as_ref()
.and_then(|x| x.write_cache_limit_bytes),
write_cache_limit_bytes: accounts_db_config.write_cache_limit_bytes,
partitioned_epoch_rewards_config,
exhaustively_verify_refcounts,
test_skip_rewrites_but_include_in_bank_hash,
storage_access,
scan_filter_for_shrinking,
is_experimental_accumulator_hash_enabled: enable_experimental_accumulator_hash,
exhaustively_verify_refcounts: accounts_db_config.exhaustively_verify_refcounts,
test_skip_rewrites_but_include_in_bank_hash: accounts_db_config
.test_skip_rewrites_but_include_in_bank_hash,
storage_access: accounts_db_config.storage_access,
scan_filter_for_shrinking: accounts_db_config.scan_filter_for_shrinking,
is_experimental_accumulator_hash_enabled: accounts_db_config
.enable_experimental_accumulator_hash
.into(),
..Self::default_with_accounts_index(
accounts_index,
base_working_path,
Expand All @@ -2039,8 +1987,8 @@ impl AccountsDb {
new.temp_paths = Some(temp_dirs);
};
new.shrink_paths = accounts_db_config
.as_ref()
.and_then(|config| config.shrink_paths.clone())
.shrink_paths
.clone()
.unwrap_or_else(|| new.paths.clone());

new.start_background_hasher();
Expand Down

0 comments on commit b99e328

Please sign in to comment.