Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rocksdb ttl #2271

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmake/Hunter/config.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ hunter_config(
hunter_config(
rocksdb
VERSION 9.6.1
CMAKE_ARGS WITH_GFLAGS=OFF
CMAKE_ARGS WITH_GFLAGS=OFF USE_RTTI=ON
)

hunter_config(
Expand Down
5 changes: 5 additions & 0 deletions core/application/app_configuration.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,11 @@ namespace kagome::application {
*/
virtual bool disableSecureMode() const = 0;

/**
* Whether to enable automatic database migration.
*/
virtual bool enableDbMigration() const = 0;

enum class OffchainWorkerMode : uint8_t {
WhenValidating,
Always,
Expand Down
12 changes: 8 additions & 4 deletions core/application/impl/app_configuration_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -175,12 +175,11 @@ namespace {

static constexpr std::array<std::string_view,
1 + KAGOME_WASM_COMPILER_WASM_EDGE>
interpreters {
interpreters{
#if KAGOME_WASM_COMPILER_WASM_EDGE == 1
"WasmEdge",
"WasmEdge",
#endif
"Binaryen"
};
"Binaryen"};

static const std::string interpreters_str =
fmt::format("[{}]", fmt::join(interpreters, ", "));
Expand Down Expand Up @@ -844,6 +843,7 @@ namespace kagome::application {
("state-pruning", po::value<std::string>()->default_value("archive"), "state pruning policy. 'archive', 'prune-discarded', or the number of finalized blocks to keep.")
("blocks-pruning", po::value<uint32_t>(), "If specified, keep block body only for specified number of recent finalized blocks.")
("enable-thorough-pruning", po::bool_switch(), "Makes trie node pruner more efficient, but the node starts slowly")
("enable-db-migration", po::bool_switch(), "Enable automatic db migration")
;

po::options_description network_desc("Network options");
Expand Down Expand Up @@ -1618,6 +1618,10 @@ namespace kagome::application {

blocks_pruning_ = find_argument<uint32_t>(vm, "blocks-pruning");

if (find_argument(vm, "enable-db-migration")) {
enable_db_migration_ = true;
}

if (find_argument(vm, "precompile-relay")) {
precompile_wasm_.emplace();
}
Expand Down
4 changes: 4 additions & 0 deletions core/application/impl/app_configuration_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,9 @@ namespace kagome::application {
std::optional<uint32_t> blocksPruning() const override {
return blocks_pruning_;
}
bool enableDbMigration() const override {
return enable_db_migration_;
}
std::optional<std::string_view> devMnemonicPhrase() const override {
if (dev_mnemonic_phrase_) {
return *dev_mnemonic_phrase_;
Expand Down Expand Up @@ -375,6 +378,7 @@ namespace kagome::application {
bool prune_discarded_states_ = false;
bool enable_thorough_pruning_ = false;
std::optional<uint32_t> blocks_pruning_;
bool enable_db_migration_ = false;
std::optional<std::string> dev_mnemonic_phrase_;
std::string node_wss_pem_;
std::optional<BenchmarkConfigSection> benchmark_config_;
Expand Down
7 changes: 6 additions & 1 deletion core/injector/application_injector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,6 +258,7 @@ namespace {
const sptr<application::ChainSpec> &chain_spec) {
// hack for recovery mode (otherwise - fails due to rocksdb bug)
bool prevent_destruction = app_config.recoverState().has_value();
bool enable_migration = app_config.enableDbMigration();

auto options = rocksdb::Options{};
options.create_if_missing = true;
Expand All @@ -273,11 +274,15 @@ namespace {
// NOLINTNEXTLINE(cppcoreguidelines-narrowing-conversions)
options.max_open_files = soft_limit.value() / 2;

const std::unordered_map<std::string, int32_t> column_ttl = {
{"avaliability_storage", 25 * 60 * 60}}; // 25 hours
auto db_res =
storage::RocksDb::create(app_config.databasePath(chain_spec->id()),
options,
app_config.dbCacheSize(),
prevent_destruction);
prevent_destruction,
column_ttl,
enable_migration);
if (!db_res) {
auto log = log::createLogger("Injector", "injector");
log->critical(
Expand Down
53 changes: 53 additions & 0 deletions core/parachain/availability/store/candidate_chunk_key.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
/**
* Copyright Quadrivium LLC
* All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*/

#pragma once

#include <optional>

#include <boost/endian/conversion.hpp>

#include "parachain/types.hpp"
#include "primitives/common.hpp"

namespace kagome {
struct CandidateChunkKey {
static constexpr size_t kCandidateHashSize =
sizeof(parachain::CandidateHash);
static constexpr size_t kChunkIndexSize = sizeof(parachain::ChunkIndex);
using Key = common::Blob<kCandidateHashSize + kChunkIndexSize>;
using HashKey = common::Blob<kCandidateHashSize>;

static Key encode(const parachain::CandidateHash &candidate_hash,
const parachain::ChunkIndex &chunk_index) {
Key key;
std::copy_n(
encode_hash(candidate_hash).data(), kCandidateHashSize, key.data());
boost::endian::store_big_u32(key.data() + kCandidateHashSize,
chunk_index);
return key;
}

static HashKey encode_hash(const parachain::CandidateHash &candidate_hash) {
HashKey key;
std::copy_n(candidate_hash.data(), kCandidateHashSize, key.data());
return key;
}

static std::optional<
std::pair<parachain::CandidateHash, parachain::ChunkIndex>>
decode(common::BufferView key) {
if (key.size() != Key::size()) {
return std::nullopt;
}
std::pair<parachain::CandidateHash, parachain::ChunkIndex> candidateChunk;
std::copy_n(key.data(), kCandidateHashSize, candidateChunk.first.data());
candidateChunk.second =
boost::endian::load_big_u32(key.data() + kCandidateHashSize);
return candidateChunk;
}
};
} // namespace kagome
Loading
Loading