From 9dee9c1d0599614d2d98729c9afbcc044b77b951 Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:40:04 +0800 Subject: [PATCH 1/4] fix: add bitops.tcl, fixed some bugs (#402) * fix bitops bug * add bitops.tcl --------- Co-authored-by: liuyuecai --- src/cmd_kv.cc | 8 ++-- tests/test_helper.tcl | 2 +- tests/unit/bitops.tcl | 95 ++++++++++++++++++++++--------------------- 3 files changed, 53 insertions(+), 52 deletions(-) diff --git a/src/cmd_kv.cc b/src/cmd_kv.cc index 54c9dba65..67b1a715c 100644 --- a/src/cmd_kv.cc +++ b/src/cmd_kv.cc @@ -323,20 +323,18 @@ void BitOpCmd::DoCmd(PClient* client) { PString res; storage::BitOpType op = storage::kBitOpDefault; - if (keys.size() == 1) { + if (!keys.empty()) { if (pstd::StringEqualCaseInsensitive(client->argv_[1], "or")) { err = kPErrorOK; op = storage::kBitOpOr; - } - } else if (keys.size() >= 2) { - if (pstd::StringEqualCaseInsensitive(client->argv_[1], "xor")) { + } else if (pstd::StringEqualCaseInsensitive(client->argv_[1], "xor")) { err = kPErrorOK; op = storage::kBitOpXor; } else if (pstd::StringEqualCaseInsensitive(client->argv_[1], "and")) { err = kPErrorOK; op = storage::kBitOpAnd; } else if (pstd::StringEqualCaseInsensitive(client->argv_[1], "not")) { - if (client->argv_.size() == 4) { + if (keys.size() == 1) { err = kPErrorOK; op = storage::kBitOpNot; } diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 0665583b0..37dbd6329 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -21,7 +21,7 @@ set ::all_tests { # unit/pubsub # unit/slowlog # unit/maxmemory - # unit/bitops + unit/bitops # unit/hyperloglog # unit/type # unit/acl diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl index 6ddae9170..29a486d3a 100644 --- a/tests/unit/bitops.tcl +++ b/tests/unit/bitops.tcl @@ -43,36 +43,36 @@ start_server {tags {"bitops"}} { r bitcount no-key } 0 -# catch {unset num} -# foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { -# incr num -# test "BITCOUNT against test vector #$num" { -# r set str $vec -# assert {[r bitcount str] == [count_bits $vec]} -# } -# } + catch {unset num} + foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] { + incr num + test "BITCOUNT against test vector #$num" { + r set str $vec + assert {[r bitcount str] == [count_bits $vec]} + } + } -# test {BITCOUNT fuzzing without start/end} { -# for {set j 0} {$j < 100} {incr j} { -# set str [randstring 0 3000] -# r set str $str -# assert {[r bitcount str] == [count_bits $str]} -# } -# } + test {BITCOUNT fuzzing without start/end} { + for {set j 0} {$j < 100} {incr j} { + set str [randstring 0 3000] + r set str $str + assert {[r bitcount str] == [count_bits $str]} + } + } -# test {BITCOUNT fuzzing with start/end} { -# for {set j 0} {$j < 100} {incr j} { -# set str [randstring 0 3000] -# r set str $str -# set l [string length $str] -# set start [randomInt $l] -# set end [randomInt $l] -# if {$start > $end} { -# lassign [list $end $start] start end -# } -# assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} -# } -# } + test {BITCOUNT fuzzing with start/end} { + for {set j 0} {$j < 100} {incr j} { + set str [randstring 0 3000] + r set str $str + set l [string length $str] + set start [randomInt $l] + set end [randomInt $l] + if {$start > $end} { + lassign [list $end $start] start end + } + assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]} + } + } test {BITCOUNT with start, end} { r set s "foobar" @@ -156,7 +156,8 @@ start_server {tags {"bitops"}} { foreach op {and or xor} { test "BITOP $op fuzzing" { for {set i 0} {$i < 10} {incr i} { - r flushall + # TODO replaced by r flushall + r flushdb set vec {} set veckeys {} set numvec [expr {[randomInt 10]+1}] @@ -172,15 +173,16 @@ start_server {tags {"bitops"}} { } } - test {BITOP NOT fuzzing} { - for {set i 0} {$i < 10} {incr i} { - r flushall - set str [randstring 0 1000] - r set str $str - r bitop not target str - assert_equal [r get target] [simulate_bit_op not $str] - } - } + test {BITOP NOT fuzzing} { + for {set i 0} {$i < 10} {incr i} { + # TODO replaced by r flushall + r flushdb + set str [randstring 0 1000] + r set str $str + r bitop not target str + assert_equal [r get target] [simulate_bit_op not $str] + } + } test {BITOP with integer encoded source objects} { r set a 1 @@ -189,14 +191,14 @@ start_server {tags {"bitops"}} { r get dest } {2} -# test {BITOP with non string source key} { -# r del c -# r set a 1 -# r set b 2 -# r lpush c foo -# catch {r bitop xor dest a b c d} e -# set e -# } {WRONGTYPE*} + test {BITOP with non string source key} { + r del c + r set a 1 + r set b 2 + r lpush c foo + catch {r bitop xor dest a b c d} e + set e + } {WRONGTYPE*} test {BITOP with empty string after non empty string (issue #529)} { r flushdb @@ -204,6 +206,7 @@ start_server {tags {"bitops"}} { r bitop or x a b } {32} +# Pikiwidb does not support the BITPOS command # test {BITPOS bit=0 with empty key returns 0} { # r del str # r bitpos str 0 From 694e2255905b2ac84e2cb47fd3ac9ae327b3ac5f Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:43:02 +0800 Subject: [PATCH 2/4] fix: add type.tcl and quit.tcl files, fix some bug (#398) * repair type.tcl * add quit.tcl --------- Co-authored-by: liuyuecai --- src/base_cmd.cc | 2 +- src/client.h | 2 ++ src/cmd_keys.cc | 2 +- src/cmd_table_manager.cc | 7 ++++--- src/cmd_thread_pool_worker.cc | 8 +++++--- tests/test_helper.tcl | 4 ++-- tests/unit/command.tcl | 5 +---- tests/unit/dump.tcl | 2 ++ tests/unit/geo.tcl | 2 ++ tests/unit/hyperloglog.tcl | 2 ++ tests/unit/introspection.tcl | 2 ++ tests/unit/latency-monitor.tcl | 6 ++++++ tests/unit/maxmemory.tcl | 6 ++++++ tests/unit/type.tcl | 24 ------------------------ 14 files changed, 36 insertions(+), 38 deletions(-) diff --git a/src/base_cmd.cc b/src/base_cmd.cc index 33332cabc..601cff75e 100644 --- a/src/base_cmd.cc +++ b/src/base_cmd.cc @@ -107,7 +107,7 @@ BaseCmd* BaseCmdGroup::GetSubCmd(const std::string& cmdName) { bool BaseCmdGroup::DoInitial(PClient* client) { client->SetSubCmdName(client->argv_[1]); if (!subCmds_.contains(client->SubCmdName())) { - client->SetRes(CmdRes::kSyntaxErr, client->argv_[0] + " unknown subcommand for '" + client->SubCmdName() + "'"); + client->SetRes(CmdRes::kErrOther, client->argv_[0] + " unknown subcommand for '" + client->SubCmdName() + "'"); return false; } return true; diff --git a/src/client.h b/src/client.h index d4370c724..1365a1db3 100644 --- a/src/client.h +++ b/src/client.h @@ -47,6 +47,8 @@ class CmdRes { kInvalidDB, kInconsistentHashTag, kErrOther, + kUnknownCmd, + kUnknownSubCmd, KIncrByOverFlow, kInvalidCursor, kWrongLeader, diff --git a/src/cmd_keys.cc b/src/cmd_keys.cc index ba703071d..9555fa67a 100644 --- a/src/cmd_keys.cc +++ b/src/cmd_keys.cc @@ -63,7 +63,7 @@ bool TypeCmd::DoInitial(PClient* client) { } void TypeCmd::DoCmd(PClient* client) { - storage::DataType type; + storage::DataType type = storage::DataType::kNones; rocksdb::Status s = PSTORE.GetBackend(client->GetCurrentDB())->GetStorage()->GetType(client->Key(), type); if (s.ok()) { client->AppendContent("+" + std::string(storage::DataTypeToString(type))); diff --git a/src/cmd_table_manager.cc b/src/cmd_table_manager.cc index c5c667404..0b10a40c5 100644 --- a/src/cmd_table_manager.cc +++ b/src/cmd_table_manager.cc @@ -17,6 +17,7 @@ #include "cmd_raft.h" #include "cmd_set.h" #include "cmd_zset.h" +#include "pstd_string.h" namespace pikiwidb { @@ -190,16 +191,16 @@ std::pair CmdTableManager::GetCommand(const std::strin auto cmd = cmds_->find(cmdName); if (cmd == cmds_->end()) { - return std::pair(nullptr, CmdRes::kSyntaxErr); + return std::pair(nullptr, CmdRes::kUnknownCmd); } if (cmd->second->HasSubCommand()) { if (client->argv_.size() < 2) { return std::pair(nullptr, CmdRes::kInvalidParameter); } - return std::pair(cmd->second->GetSubCmd(client->argv_[1]), CmdRes::kSyntaxErr); + return std::pair(cmd->second->GetSubCmd(pstd::StringToLower(client->argv_[1])), CmdRes::kUnknownSubCmd); } - return std::pair(cmd->second.get(), CmdRes::kSyntaxErr); + return std::pair(cmd->second.get(), CmdRes::kOK); } bool CmdTableManager::CmdExist(const std::string& cmd) const { diff --git a/src/cmd_thread_pool_worker.cc b/src/cmd_thread_pool_worker.cc index cafa31a71..0afdc16fe 100644 --- a/src/cmd_thread_pool_worker.cc +++ b/src/cmd_thread_pool_worker.cc @@ -21,10 +21,12 @@ void CmdWorkThreadPoolWorker::Work() { auto [cmdPtr, ret] = cmd_table_manager_.GetCommand(task->CmdName(), task->Client().get()); if (!cmdPtr) { - if (ret == CmdRes::kInvalidParameter) { - task->Client()->SetRes(CmdRes::kInvalidParameter); + if (ret == CmdRes::kUnknownCmd) { + task->Client()->SetRes(CmdRes::kErrOther, "unknown command '" + task->CmdName() + "'"); + } else if (ret == CmdRes::kUnknownSubCmd) { + task->Client()->SetRes(CmdRes::kErrOther, "unknown sub command '" + task->Client().get()->argv_[1] + "'"); } else { - task->Client()->SetRes(CmdRes::kSyntaxErr, "unknown command '" + task->CmdName() + "'"); + task->Client()->SetRes(CmdRes::kInvalidParameter); } g_pikiwidb->PushWriteTask(task->Client()); continue; diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 37dbd6329..aef0c4bf4 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -16,14 +16,14 @@ set ::all_tests { # unit/basic # unit/scan # unit/multi - # unit/quit + unit/quit # unit/type/list # unit/pubsub # unit/slowlog # unit/maxmemory unit/bitops # unit/hyperloglog - # unit/type + unit/type # unit/acl # unit/type/list-2 # unit/type/list-3 diff --git a/tests/unit/command.tcl b/tests/unit/command.tcl index a647b42b7..1a8108b74 100644 --- a/tests/unit/command.tcl +++ b/tests/unit/command.tcl @@ -1,7 +1,4 @@ -# Copyright (c) 2023-present, Qihoo, Inc. All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. +# Pikiwidb does not support the docs command start_server {tags {"command"}} { test "Command docs supported." { diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl index b79c3ba9d..7396dea66 100644 --- a/tests/unit/dump.tcl +++ b/tests/unit/dump.tcl @@ -1,3 +1,5 @@ +# Pikiwidb does not support the restore command + start_server {tags {"dump"}} { test {DUMP / RESTORE are able to serialize / unserialize a simple key} { r set foo bar diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index 7ed871098..fad7153ce 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -1,3 +1,5 @@ +# Pikiwidb does not support the geo command + # Helper functions to simulate search-in-radius in the Tcl side in order to # verify the Redis implementation with a fuzzy test. proc geo_degrad deg {expr {$deg*atan(1)*8/360}} diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl index c8d56e4ba..ffe41a3ab 100755 --- a/tests/unit/hyperloglog.tcl +++ b/tests/unit/hyperloglog.tcl @@ -1,3 +1,5 @@ +# Pikiwidb does not support the pfadd command + start_server {tags {"hll"}} { # test {HyperLogLog self test passes} { # catch {r pfselftest} e diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 342bb939a..c9409a8ec 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -1,3 +1,5 @@ +# Pikiwidb does not support the client command + start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl index b736cad98..25cc12b03 100644 --- a/tests/unit/latency-monitor.tcl +++ b/tests/unit/latency-monitor.tcl @@ -3,6 +3,7 @@ start_server {tags {"latency-monitor"}} { r config set latency-monitor-threshold 200 r latency reset + # This parameter is not available in Pika test {Test latency events logging} { r debug sleep 0.3 after 1100 @@ -12,6 +13,7 @@ start_server {tags {"latency-monitor"}} { assert {[r latency history command] >= 3} } + # This parameter is not available in Pika test {LATENCY HISTORY output is ok} { set min 250 set max 450 @@ -24,6 +26,7 @@ start_server {tags {"latency-monitor"}} { } } + # This parameter is not available in Pika test {LATENCY LATEST output is ok} { foreach event [r latency latest] { lassign $event eventname time latency max @@ -34,15 +37,18 @@ start_server {tags {"latency-monitor"}} { } } + # This parameter is not available in Pika test {LATENCY HISTORY / RESET with wrong event name is fine} { assert {[llength [r latency history blabla]] == 0} assert {[r latency reset blabla] == 0} } + # This parameter is not available in Pika test {LATENCY DOCTOR produces some output} { assert {[string length [r latency doctor]] > 0} } + # This parameter is not available in Pika test {LATENCY RESET is able to reset events} { assert {[r latency reset] > 0} assert {[r latency latest] eq {}} diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl index 2f853f29d..59510c03d 100644 --- a/tests/unit/maxmemory.tcl +++ b/tests/unit/maxmemory.tcl @@ -7,12 +7,14 @@ start_server {tags {"maxmemory"}} { # The current maxmemory command does not support config set and policy. # For a complete list of commands, refer to the wiki: https://github.com/OpenAtomFoundation/pika/wiki/pika-%E5%B7%AE%E5%BC%82%E5%8C%96%E5%91%BD%E4%BB%A4 + # This parameter is not available in Pika # test "Without maxmemory small integers are shared" { # r config set maxmemory 0 # r set a 1 # assert {[r object refcount a] > 1} # } + # This parameter is not available in Pika # test "With maxmemory and non-LRU policy integers are still shared" { # r config set maxmemory 1073741824 # r config set maxmemory-policy allkeys-random @@ -20,6 +22,7 @@ start_server {tags {"maxmemory"}} { # assert {[r object refcount a] > 1} # } + # This parameter is not available in Pika # test "With maxmemory and LRU policy integers are not shared" { # r config set maxmemory 1073741824 # r config set maxmemory-policy allkeys-lru @@ -31,6 +34,7 @@ start_server {tags {"maxmemory"}} { # r config set maxmemory 0 # } + # This parameter is not available in Pika # foreach policy { # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl # } { @@ -63,6 +67,7 @@ start_server {tags {"maxmemory"}} { # } # } + # This parameter is not available in Pika # foreach policy { # allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl # } { @@ -105,6 +110,7 @@ start_server {tags {"maxmemory"}} { # } # } + # This parameter is not available in Pika # foreach policy { # volatile-lru volatile-random volatile-ttl # } { diff --git a/tests/unit/type.tcl b/tests/unit/type.tcl index 2b5b9045a..1be894492 100644 --- a/tests/unit/type.tcl +++ b/tests/unit/type.tcl @@ -23,28 +23,4 @@ start_server {tags {"type"}} { r sadd key5 key5 assert_equal set [r type key5] } - - test "ptype none" { - r flushdb - assert_equal {} [r ptype key] - } - - test "ptype command" { - r flushdb - - r set key1 key1 - assert_equal string [r ptype key1] - - r hset key1 key key1 - assert_equal {string hash} [r ptype key1] - - r lpush key1 key1 - assert_equal {string hash list} [r ptype key1] - - r zadd key1 100 key1 - assert_equal {string hash list zset} [r ptype key1] - - r sadd key1 key1 - assert_equal {string hash list zset set} [r ptype key1] - } } \ No newline at end of file From 124b41eb3946b5b9cbf80d94bcecf8d24616cd55 Mon Sep 17 00:00:00 2001 From: JayLiu <38887641+luky116@users.noreply.github.com> Date: Thu, 8 Aug 2024 16:51:32 +0800 Subject: [PATCH 3/4] fix: add basic.tcl, fixed some bugs (#403) * fix Rename and Renamenx bug * add basic.tcl --------- Co-authored-by: liuyuecai --- src/storage/src/redis_hashes.cc | 82 ++++++++++++--------- src/storage/src/redis_lists.cc | 80 +++++++++++--------- src/storage/src/redis_sets.cc | 91 +++++++++++++---------- src/storage/src/redis_strings.cc | 56 ++++++++------ src/storage/src/redis_zsets.cc | 88 ++++++++++++---------- src/storage/src/storage.cc | 122 +++++++++++++------------------ tests/test_helper.tcl | 2 +- tests/unit/basic.tcl | 102 +++++++++++++++----------- tests/unit/keys.tcl | 1 + 9 files changed, 340 insertions(+), 284 deletions(-) diff --git a/src/storage/src/redis_hashes.cc b/src/storage/src/redis_hashes.cc index 9fa83c033..850230040 100644 --- a/src/storage/src/redis_hashes.cc +++ b/src/storage/src/redis_hashes.cc @@ -1150,21 +1150,27 @@ Status Redis::HashesRename(const Slice& key, Redis* new_inst, const Slice& newke BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok()) { - if (IsStale(meta_value)) { - return Status::NotFound(); - } - // copy a new hash with newkey - ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - statistic = parsed_hashes_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kHashes, newkey.ToString(), statistic); - - // HashesDel key - parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + if (!s.ok() || !ExpectedMetaValue(DataType::kHashes, meta_value)) { + return s; + } + if (key == newkey) { + return Status::OK(); } + + if (IsStale(meta_value)) { + return Status::NotFound(); + } + // copy a new hash with newkey + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + statistic = parsed_hashes_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kHashes, newkey.ToString(), statistic); + + // HashesDel key + parsed_hashes_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; } @@ -1178,30 +1184,36 @@ Status Redis::HashesRenamenx(const Slice& key, Redis* new_inst, const Slice& new BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (!s.ok() || !ExpectedMetaValue(DataType::kHashes, meta_value)) { + return s; + } + if (key == newkey) { + return Status::Corruption(); + } + + if (IsStale(meta_value)) { + return Status::NotFound(); + } + ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); + // check if newkey exists. + std::string new_meta_value; + s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); if (s.ok()) { - if (IsStale(meta_value)) { - return Status::NotFound(); + if (!IsStale(new_meta_value)) { + return Status::Corruption(); // newkey already exists. } - ParsedHashesMetaValue parsed_hashes_meta_value(&meta_value); - // check if newkey exists. - std::string new_meta_value; - s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); - if (s.ok()) { - if (!IsStale(new_meta_value)) { - return Status::Corruption(); // newkey already exists. - } - } - ParsedHashesMetaValue parsed_hashes_new_meta_value(&new_meta_value); - // copy a new hash with newkey - statistic = parsed_hashes_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kHashes, newkey.ToString(), statistic); - - // HashesDel key - parsed_hashes_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); } + ParsedHashesMetaValue parsed_hashes_new_meta_value(&new_meta_value); + // copy a new hash with newkey + statistic = parsed_hashes_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kHashes, newkey.ToString(), statistic); + + // HashesDel key + parsed_hashes_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kHashes, key.ToString(), statistic); + return s; } diff --git a/src/storage/src/redis_lists.cc b/src/storage/src/redis_lists.cc index d6c6391a3..ca7187ea7 100644 --- a/src/storage/src/redis_lists.cc +++ b/src/storage/src/redis_lists.cc @@ -981,21 +981,27 @@ Status Redis::ListsRename(const Slice& key, Redis* new_inst, const Slice& newkey BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok()) { - if (IsStale(meta_value)) { - return Status::NotFound(); - } - // copy a new list with newkey - ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - statistic = parsed_lists_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kLists, newkey.ToString(), statistic); + if (!s.ok() || !ExpectedMetaValue(DataType::kLists, meta_value)) { + return s; + } + if (key == newkey) { + return Status::OK(); + } - // ListsDel key - parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + if (IsStale(meta_value)) { + return Status::NotFound(); } + // copy a new list with newkey + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + statistic = parsed_lists_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kLists, newkey.ToString(), statistic); + + // ListsDel key + parsed_lists_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + return s; } @@ -1008,30 +1014,36 @@ Status Redis::ListsRenamenx(const Slice& key, Redis* new_inst, const Slice& newk BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (!s.ok() || !ExpectedMetaValue(DataType::kLists, meta_value)) { + return s; + } + if (key == newkey) { + return Status::Corruption(); + } + + if (IsStale(meta_value)) { + return Status::NotFound(); + } + // check if newkey exists. + std::string new_meta_value; + ParsedListsMetaValue parsed_lists_meta_value(&meta_value); + s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); if (s.ok()) { - if (IsStale(meta_value)) { - return Status::NotFound(); - } - // check if newkey exists. - std::string new_meta_value; - ParsedListsMetaValue parsed_lists_meta_value(&meta_value); - s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); - if (s.ok()) { - if (IsStale(new_meta_value)) { - return Status::Corruption(); // newkey already exists. - } + if (IsStale(new_meta_value)) { + return Status::Corruption(); // newkey already exists. } - ParsedSetsMetaValue parsed_lists_new_meta_value(&new_meta_value); - // copy a new list with newkey - statistic = parsed_lists_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kLists, newkey.ToString(), statistic); - - // ListsDel key - parsed_lists_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); } + ParsedSetsMetaValue parsed_lists_new_meta_value(&new_meta_value); + // copy a new list with newkey + statistic = parsed_lists_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kLists, newkey.ToString(), statistic); + + // ListsDel key + parsed_lists_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kLists, key.ToString(), statistic); + return s; } diff --git a/src/storage/src/redis_sets.cc b/src/storage/src/redis_sets.cc index dfa5ba8ab..ca2170ad1 100644 --- a/src/storage/src/redis_sets.cc +++ b/src/storage/src/redis_sets.cc @@ -1244,23 +1244,32 @@ Status Redis::SetsRename(const Slice& key, Redis* new_inst, const Slice& newkey) BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok()) { - ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale()) { - return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.Count() == 0) { - return rocksdb::Status::NotFound(); - } - // copy a new set with newkey - statistic = parsed_sets_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kSets, newkey.ToString(), statistic); + if (!s.ok()) { + return s; + } + if (!s.ok() || !ExpectedMetaValue(DataType::kSets, meta_value)) { + return s; + } + if (key == newkey) { + return Status::OK(); + } - // SetsDel key - parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); } + // copy a new set with newkey + statistic = parsed_sets_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kSets, newkey.ToString(), statistic); + + // SetsDel key + parsed_sets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); + return s; } @@ -1273,33 +1282,39 @@ Status Redis::SetsRenamenx(const Slice& key, Redis* new_inst, const Slice& newke BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); rocksdb::Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (!s.ok() || !ExpectedMetaValue(DataType::kSets, meta_value)) { + return s; + } + if (key == newkey) { + return Status::Corruption(); + } + + ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); + if (parsed_sets_meta_value.IsStale()) { + return rocksdb::Status::NotFound("Stale"); + } else if (parsed_sets_meta_value.Count() == 0) { + return rocksdb::Status::NotFound(); + } + // check if newkey exists. + std::string new_meta_value; + s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); if (s.ok()) { - ParsedSetsMetaValue parsed_sets_meta_value(&meta_value); - if (parsed_sets_meta_value.IsStale()) { - return rocksdb::Status::NotFound("Stale"); - } else if (parsed_sets_meta_value.Count() == 0) { - return rocksdb::Status::NotFound(); - } - // check if newkey exists. - std::string new_meta_value; - s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); - if (s.ok()) { - ParsedSetsMetaValue parsed_sets_new_meta_value(&new_meta_value); - if (!parsed_sets_new_meta_value.IsStale() && parsed_sets_new_meta_value.Count() != 0) { - return Status::Corruption(); // newkey already exists. - } + ParsedSetsMetaValue parsed_sets_new_meta_value(&new_meta_value); + if (!parsed_sets_new_meta_value.IsStale() && parsed_sets_new_meta_value.Count() != 0) { + return Status::Corruption(); // newkey already exists. } + } - // copy a new set with newkey - statistic = parsed_sets_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kSets, newkey.ToString(), statistic); + // copy a new set with newkey + statistic = parsed_sets_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kSets, newkey.ToString(), statistic); + + // SetsDel key + parsed_sets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); - // SetsDel key - parsed_sets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kSets, key.ToString(), statistic); - } return s; } diff --git a/src/storage/src/redis_strings.cc b/src/storage/src/redis_strings.cc index 60c0bdfd9..5ea64c5f8 100644 --- a/src/storage/src/redis_strings.cc +++ b/src/storage/src/redis_strings.cc @@ -517,9 +517,9 @@ Status Redis::Incrby(const Slice& key, int64_t value, int64_t* ret) { StringsValue strings_value(buf); return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else if (!ExpectedMetaValue(DataType::kStrings, old_value)) { - return Status::InvalidArgument(fmt::format("WRONGTYPE, key: {}, expect type: {}, get type: {}", key.ToString(), - DataTypeStrings[static_cast(DataType::kStrings)], - DataTypeStrings[static_cast(GetMetaValueType(old_value))])); + return Status::NotSupported(fmt::format("WRONGTYPE, key: {}, expect type: {}, get type: {}", key.ToString(), + DataTypeStrings[static_cast(DataType::kStrings)], + DataTypeStrings[static_cast(GetMetaValueType(old_value))])); } else { ParsedStringsValue parsed_strings_value(&old_value); uint64_t timestamp = parsed_strings_value.Etime(); @@ -886,6 +886,9 @@ Status Redis::Setrange(const Slice& key, int64_t start_offset, const Slice& valu strings_value.SetEtime(timestamp); return db_->Put(default_write_options_, base_key.Encode(), strings_value.Encode()); } else if (s.IsNotFound()) { + if (value.empty()) { // ignore empty value + return Status::OK(); + } std::string tmp(start_offset, '\0'); new_value = tmp.append(value.data()); *ret = static_cast(new_value.length()); @@ -1129,13 +1132,18 @@ Status Redis::StringsRename(const Slice& key, Redis* new_inst, const Slice& newk BaseKey base_key(key); BaseKey base_newkey(newkey); s = db_->Get(default_read_options_, base_key.Encode(), &value); - if (s.ok()) { - if (IsStale(value)) { - return Status::NotFound("Stale"); - } - db_->Delete(default_write_options_, base_key.Encode()); - s = new_inst->GetDB()->Put(default_write_options_, base_newkey.Encode(), value); + if (!s.ok() || !ExpectedMetaValue(DataType::kStrings, value)) { + return s; + } + if (key == newkey) { + return Status::OK(); + } + + if (IsStale(value)) { + return Status::NotFound("Stale"); } + db_->Delete(default_write_options_, base_key.Encode()); + s = new_inst->GetDB()->Put(default_write_options_, base_newkey.Encode(), value); return s; } @@ -1148,20 +1156,26 @@ Status Redis::StringsRenamenx(const Slice& key, Redis* new_inst, const Slice& ne BaseKey base_key(key); BaseKey base_newkey(newkey); s = db_->Get(default_read_options_, base_key.Encode(), &value); + if (!s.ok() || !ExpectedMetaValue(DataType::kStrings, value)) { + return s; + } + if (key == newkey) { + return Status::Corruption(); + } + + if (IsStale(value)) { + return Status::NotFound("Stale"); + } + // check if newkey exists. + s = new_inst->GetDB()->Get(default_read_options_, base_newkey.Encode(), &value); if (s.ok()) { - if (IsStale(value)) { - return Status::NotFound("Stale"); + if (!IsStale(value)) { + return Status::Corruption(); // newkey already exists. } - // check if newkey exists. - s = new_inst->GetDB()->Get(default_read_options_, base_newkey.Encode(), &value); - if (s.ok()) { - if (!IsStale(value)) { - return Status::Corruption(); // newkey already exists. - } - } - db_->Delete(default_write_options_, base_key.Encode()); - s = new_inst->GetDB()->Put(default_write_options_, base_newkey.Encode(), value); } + db_->Delete(default_write_options_, base_key.Encode()); + s = new_inst->GetDB()->Put(default_write_options_, base_newkey.Encode(), value); + return s; } @@ -1596,7 +1610,7 @@ Status Redis::GetType(const Slice& key, enum DataType& type) { if (s.ok()) { type = static_cast(static_cast(meta_value[0])); } - return Status::OK(); + return s; } Status Redis::IsExist(const Slice& key) { diff --git a/src/storage/src/redis_zsets.cc b/src/storage/src/redis_zsets.cc index c12fed800..d70948baa 100644 --- a/src/storage/src/redis_zsets.cc +++ b/src/storage/src/redis_zsets.cc @@ -1576,23 +1576,29 @@ Status Redis::ZsetsRename(const Slice& key, Redis* new_inst, const Slice& newkey BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); - if (s.ok()) { - ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale()) { - return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.Count() == 0) { - return Status::NotFound(); - } - // copy a new zset with newkey - statistic = parsed_zsets_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kZSets, newkey.ToString(), statistic); + if (!s.ok() || !ExpectedMetaValue(DataType::kZSets, meta_value)) { + return s; + } + if (key == newkey) { + return Status::OK(); + } - // ZsetsDel key - parsed_zsets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); } + // copy a new zset with newkey + statistic = parsed_zsets_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kZSets, newkey.ToString(), statistic); + + // ZsetsDel key + parsed_zsets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); + return s; } @@ -1605,33 +1611,39 @@ Status Redis::ZsetsRenamenx(const Slice& key, Redis* new_inst, const Slice& newk BaseMetaKey base_meta_key(key); BaseMetaKey base_meta_newkey(newkey); Status s = db_->Get(default_read_options_, handles_[kMetaCF], base_meta_key.Encode(), &meta_value); + if (!s.ok() || !ExpectedMetaValue(DataType::kZSets, meta_value)) { + return s; + } + if (key == newkey) { + return Status::Corruption(); + } + + ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); + if (parsed_zsets_meta_value.IsStale()) { + return Status::NotFound("Stale"); + } else if (parsed_zsets_meta_value.Count() == 0) { + return Status::NotFound(); + } + // check if newkey exist. + std::string new_meta_value; + s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); if (s.ok()) { - ParsedZSetsMetaValue parsed_zsets_meta_value(&meta_value); - if (parsed_zsets_meta_value.IsStale()) { - return Status::NotFound("Stale"); - } else if (parsed_zsets_meta_value.Count() == 0) { - return Status::NotFound(); - } - // check if newkey exist. - std::string new_meta_value; - s = new_inst->GetDB()->Get(default_read_options_, handles_[kMetaCF], base_meta_newkey.Encode(), &new_meta_value); - if (s.ok()) { - ParsedSetsMetaValue parsed_zsets_new_meta_value(&new_meta_value); - if (!parsed_zsets_new_meta_value.IsStale() && parsed_zsets_new_meta_value.Count() != 0) { - return Status::Corruption(); // newkey already exists. - } + ParsedSetsMetaValue parsed_zsets_new_meta_value(&new_meta_value); + if (!parsed_zsets_new_meta_value.IsStale() && parsed_zsets_new_meta_value.Count() != 0) { + return Status::Corruption(); // newkey already exists. } + } - // copy a new zset with newkey - statistic = parsed_zsets_meta_value.Count(); - s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); - new_inst->UpdateSpecificKeyStatistics(DataType::kZSets, newkey.ToString(), statistic); + // copy a new zset with newkey + statistic = parsed_zsets_meta_value.Count(); + s = new_inst->GetDB()->Put(default_write_options_, handles_[kMetaCF], base_meta_newkey.Encode(), meta_value); + new_inst->UpdateSpecificKeyStatistics(DataType::kZSets, newkey.ToString(), statistic); + + // ZsetsDel key + parsed_zsets_meta_value.InitialMetaValue(); + s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); + UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); - // ZsetsDel key - parsed_zsets_meta_value.InitialMetaValue(); - s = db_->Put(default_write_options_, handles_[kMetaCF], base_meta_key.Encode(), meta_value); - UpdateSpecificKeyStatistics(DataType::kZSets, key.ToString(), statistic); - } return s; } diff --git a/src/storage/src/storage.cc b/src/storage/src/storage.cc index 619b87832..b8d48b2c1 100644 --- a/src/storage/src/storage.cc +++ b/src/storage/src/storage.cc @@ -1523,8 +1523,7 @@ int64_t Storage::TTL(const Slice& key) { Status Storage::GetType(const std::string& key, enum DataType& type) { auto& inst = GetDBInstance(key); - inst->GetType(key, type); - return Status::OK(); + return inst->GetType(key, type); } Status Storage::Keys(const DataType& data_type, const std::string& pattern, std::vector* keys) { @@ -1548,99 +1547,76 @@ Status Storage::Keys(const DataType& data_type, const std::string& pattern, std: } Status Storage::Rename(const std::string& key, const std::string& newkey) { - Status ret = Status::NotFound(); auto& inst = GetDBInstance(key); auto& new_inst = GetDBInstance(newkey); - // Strings - Status s = inst->StringsRename(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; - } - - // Hashes - s = inst->HashesRename(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; - } - - // Sets - s = inst->SetsRename(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { + DataType type; + Status s = GetType(key, type); + if (!s.ok()) { return s; } - - // Lists - s = inst->ListsRename(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; + if (key == newkey) { + return Status::OK(); } - // ZSets - s = inst->ZsetsRename(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; + switch (type) { + case DataType::kStrings: + s = inst->StringsRename(key, new_inst.get(), newkey); + break; + case DataType::kHashes: + s = inst->HashesRename(key, new_inst.get(), newkey); + break; + case DataType::kSets: + s = inst->SetsRename(key, new_inst.get(), newkey); + break; + case DataType::kZSets: + s = inst->ZsetsRename(key, new_inst.get(), newkey); + break; + case DataType::kLists: + s = inst->ListsRename(key, new_inst.get(), newkey); + break; + default: + return Status::NotFound(); } - return ret; + return s; } Status Storage::Renamenx(const std::string& key, const std::string& newkey) { - Status ret = Status::NotFound(); auto& inst = GetDBInstance(key); auto& new_inst = GetDBInstance(newkey); - // Strings - Status s = inst->StringsRenamenx(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; - } - - // Hashes - s = inst->HashesRenamenx(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { + DataType type; + Status s = GetType(key, type); + if (!s.ok()) { return s; } - // Sets - s = inst->SetsRenamenx(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; + if (key == newkey) { + return Status::Corruption(); } - // Lists - s = inst->ListsRenamenx(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; - } - - // ZSets - s = inst->ZsetsRenamenx(key, new_inst.get(), newkey); - if (s.ok()) { - ret = Status::OK(); - } else if (!s.IsNotFound()) { - return s; + switch (type) { + case DataType::kStrings: + s = inst->StringsRenamenx(key, new_inst.get(), newkey); + break; + case DataType::kHashes: + s = inst->HashesRenamenx(key, new_inst.get(), newkey); + break; + case DataType::kSets: + s = inst->SetsRenamenx(key, new_inst.get(), newkey); + break; + case DataType::kZSets: + s = inst->ZsetsRenamenx(key, new_inst.get(), newkey); + break; + case DataType::kLists: + s = inst->ListsRenamenx(key, new_inst.get(), newkey); + break; + default: + return Status::NotFound(); } - return ret; + return s; } void Storage::ScanDatabase(const DataType& type) { diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index aef0c4bf4..938edaeec 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -13,7 +13,7 @@ source tests/support/util.tcl set ::all_tests { # unit/printver - # unit/basic + unit/basic # unit/scan # unit/multi unit/quit diff --git a/tests/unit/basic.tcl b/tests/unit/basic.tcl index f9f29d22d..654b04461 100644 --- a/tests/unit/basic.tcl +++ b/tests/unit/basic.tcl @@ -1,5 +1,6 @@ start_server {tags {"basic"}} { +# Pikiwidb does not support the dbsize command # test {DEL all keys to start with a clean DB} { # foreach key [r keys *] {r del $key} # r dbsize @@ -38,12 +39,14 @@ start_server {tags {"basic"}} { lsort [r keys *] } {foo_a foo_b foo_c key_x key_y key_z} +# Pikiwidb does not support the dbsize command # test {DBSIZE} { # r info keyspace 1 # after 1000 # r dbsize # } {6} +# Pikiwidb does not support the dbsize command # test {DEL all keys} { # foreach key [r keys *] {r del $key} # r info keyspace 1 @@ -51,50 +54,51 @@ start_server {tags {"basic"}} { # r dbsize # } {0} -# test {Very big payload in GET/SET} { -# set buf [string repeat "abcd" 1000000] -# r set foo $buf -# r get foo -# } [string repeat "abcd" 1000000] - -# tags {"slow"} { -# test {Very big payload random access} { -# set err {} -# array set payload {} -# for {set j 0} {$j < 100} {incr j} { -# set size [expr 1+[randomInt 100000]] -# set buf [string repeat "pl-$j" $size] -# set payload($j) $buf -# r set bigpayload_$j $buf -# } -# for {set j 0} {$j < 1000} {incr j} { -# set index [randomInt 100] -# set buf [r get bigpayload_$index] -# if {$buf != $payload($index)} { -# set err "Values differ: I set '$payload($index)' but I read back '$buf'" -# break -# } -# } -# unset payload -# set _ $err -# } {} -# -# test {SET 10000 numeric keys and access all them in reverse order} { -# set err {} -# for {set x 0} {$x < 10000} {incr x} { -# r set $x $x -# } -# set sum 0 -# for {set x 9999} {$x >= 0} {incr x -1} { -# set val [r get $x] -# if {$val ne $x} { -# set err "Element at position $x is $val instead of $x" -# break -# } -# } -# set _ $err -# } {} - + test {Very big payload in GET/SET} { + set buf [string repeat "abcd" 1000000] + r set foo $buf + r get foo + } [string repeat "abcd" 1000000] + + tags {"slow"} { + test {Very big payload random access} { + set err {} + array set payload {} + for {set j 0} {$j < 100} {incr j} { + set size [expr 1+[randomInt 100000]] + set buf [string repeat "pl-$j" $size] + set payload($j) $buf + r set bigpayload_$j $buf + } + for {set j 0} {$j < 1000} {incr j} { + set index [randomInt 100] + set buf [r get bigpayload_$index] + if {$buf != $payload($index)} { + set err "Values differ: I set '$payload($index)' but I read back '$buf'" + break + } + } + unset payload + set _ $err + } {} + + test {SET 10000 numeric keys and access all them in reverse order} { + set err {} + for {set x 0} {$x < 10000} {incr x} { + r set $x $x + } + set sum 0 + for {set x 9999} {$x >= 0} {incr x -1} { + set val [r get $x] + if {$val ne $x} { + set err "Element at position $x is $val instead of $x" + break + } + } + set _ $err + } {} + +# Pikiwidb does not support the dbsize command # test {DBSIZE should be 10101 now} { # r info keyspace 1 # after 1000 @@ -127,6 +131,7 @@ start_server {tags {"basic"}} { r incrby novar 17179869184 } {34359738368} +# TODO 待讨论是否要兼容 # test {INCR fails against key with spaces (left)} { # r set novar " 11" # catch {r incr novar} err @@ -145,6 +150,7 @@ start_server {tags {"basic"}} { format $err } {ERR*} +# TODO wait util pikiwidb compatibled redis error code specification, ref issue: https://github.com/OpenAtomFoundation/pikiwidb/issues/382 # test {INCR fails against a key holding a list} { # r rpush mylist 1 # catch {r incr mylist} err @@ -201,6 +207,7 @@ start_server {tags {"basic"}} { format $err } {ERR*valid*} +# TODO wait util pikiwidb compatibled redis error code specification, ref issue: https://github.com/OpenAtomFoundation/pikiwidb/issues/382 # test {INCRBYFLOAT fails against a key holding a list} { # r del mylist # set err {} @@ -268,6 +275,7 @@ start_server {tags {"basic"}} { assert_equal 20 [r get x] } +# Pikiwidb does not support the set-active-expire command # test "DEL against expired key" { # r debug set-active-expire 0 # r setex keyExpire 1 valExpire @@ -292,6 +300,7 @@ start_server {tags {"basic"}} { append res [r exists emptykey] } {10} +# Pikiwidb does not support the read command # test {Commands pipelining} { # set fd [r channel] # puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n" @@ -392,6 +401,7 @@ start_server {tags {"basic"}} { r ttl mykey2 } {-1} +# Pikiwidb does not support the dbsize command # test {DEL all keys again (DB 0)} { # foreach key [r keys *] { # r del $key @@ -399,6 +409,7 @@ start_server {tags {"basic"}} { # r dbsize # } {0} +# Pikiwidb does not support the dbsize command # test {DEL all keys again (DB 1)} { # r select 10 # foreach key [r keys *] { @@ -409,6 +420,7 @@ start_server {tags {"basic"}} { # format $res # } {0} +# Pikiwidb does not support the dbsize command # test {MOVE basic usage} { # r set mykey foobar # r move mykey 10 @@ -422,11 +434,13 @@ start_server {tags {"basic"}} { # format $res # } [list 0 0 foobar 1] +# Pikiwidb does not support the move command # test {MOVE against key existing in the target DB} { # r set mykey hello # r move mykey 10 # } {0} +# Pikiwidb does not support the move command # test {MOVE against non-integer DB (#1428)} { # r set mykey hello # catch {r move mykey notanumber} e diff --git a/tests/unit/keys.tcl b/tests/unit/keys.tcl index cb62444f3..9179e1b57 100644 --- a/tests/unit/keys.tcl +++ b/tests/unit/keys.tcl @@ -1,3 +1,4 @@ +# TODO all param is invalid start_server {tags {"keys"}} { test {KEYS with pattern} { foreach key {key_x key_y key_z foo_a foo_b foo_c} { From a72cd92fc7b1b7d6fd20ea72f82d01930fcdca64 Mon Sep 17 00:00:00 2001 From: VanessaXWGUO <85667332+VanessaXWGUO@users.noreply.github.com> Date: Fri, 9 Aug 2024 10:59:51 +0800 Subject: [PATCH 4/4] Rename LICENSE to LICENSE.md (#408) add the ".md" in the title --- LICENSE => LICENSE.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename LICENSE => LICENSE.md (100%) diff --git a/LICENSE b/LICENSE.md similarity index 100% rename from LICENSE rename to LICENSE.md