From 9df7bae345fc3e8e8195fe0df5ac843bdae2fb7a Mon Sep 17 00:00:00 2001
From: secwall <secwall@yandex-team.ru>
Date: Wed, 15 Jan 2025 11:22:50 +0100
Subject: [PATCH] Move to Valkey

---
 Makefile                                      |   48 +-
 README.md                                     |    8 +-
 cmd/rdsync/hosts.go                           |    6 +-
 cmd/rdsync/main.go                            |    2 +-
 cmd/rdsync/state.go                           |    2 +-
 go.mod                                        |    4 +-
 go.sum                                        |   16 +-
 internal/app/active_nodes.go                  |    4 +-
 internal/app/aof.go                           |    6 +-
 internal/app/app.go                           |   10 +-
 internal/app/cache.go                         |   18 +-
 internal/app/cli.go                           |   22 +-
 internal/app/failover.go                      |   12 +-
 internal/app/manager.go                       |    8 +-
 internal/app/master.go                        |    6 +-
 internal/app/repair.go                        |   16 +-
 internal/app/replication.go                   |    4 +-
 internal/app/switchover.go                    |   18 +-
 internal/config/config.go                     |   48 +-
 internal/{redis => valkey}/node.go            |  317 ++--
 internal/{redis => valkey}/senticache.go      |  127 +-
 internal/{redis => valkey}/shard.go           |   16 +-
 internal/{redis => valkey}/tls.go             |    2 +-
 tests/features/00_cluster_smoke.feature       |   76 +-
 tests/features/00_sentinel_smoke.feature      |   88 +-
 tests/features/01_cluster_maintenance.feature |   80 +-
 .../features/01_sentinel_maintenance.feature  |  128 +-
 .../02_cluster_switchover_from.feature        |  100 +-
 .../02_sentinel_switchover_from.feature       |  126 +-
 .../features/03_cluster_switchover_to.feature |   56 +-
 .../03_sentinel_switchover_to.feature         |   68 +-
 tests/features/04_cluster_failover.feature    |  198 +--
 tests/features/04_sentinel_failover.feature   |  214 +--
 .../05_cluster_replication_fix.feature        |  208 +--
 .../05_sentinel_replication_fix.feature       |  132 +-
 tests/features/06_cluster_lost.feature        |  120 +-
 tests/features/06_sentinel_lost.feature       |  120 +-
 .../features/07_cluster_local_repair.feature  |   38 +-
 .../features/07_sentinel_local_repair.feature |   54 +-
 tests/images/docker-compose.yaml              |   18 +-
 tests/images/jepsen-compose.yaml              |   18 +-
 .../jepsen/jepsen/src/jepsen/rdsync.clj       |   20 +-
 .../jepsen/jepsen/test/jepsen/rdsync_test.clj |    4 +-
 tests/images/jepsen/save_logs.sh              |    6 +-
 tests/images/redis/Dockerfile                 |    7 -
 tests/images/redis/setup.sh                   |   10 -
 tests/images/redis/setup_cluster.sh           |   71 -
 tests/images/redis/setup_sentinel.sh          |   43 -
 tests/images/redis/supervisor_redis.conf      |    9 -
 tests/images/valkey/Dockerfile                |    7 +
 tests/images/{redis => valkey}/default.conf   |    2 +-
 .../{redis => valkey}/rdsync_cluster.yaml     |    6 +-
 .../{redis => valkey}/rdsync_sentinel.yaml    |    6 +-
 .../images/{redis => valkey}/senticache.conf  |    0
 tests/images/valkey/setup.sh                  |   10 +
 tests/images/valkey/setup_cluster.sh          |   71 +
 tests/images/valkey/setup_sentinel.sh         |   43 +
 .../{redis => valkey}/supervisor_rdsync.conf  |    0
 .../supervisor_senticache.conf                |    4 +-
 tests/images/valkey/supervisor_valkey.conf    |    9 +
 tests/images/zookeeper/setup_zk.sh            |    6 +-
 tests/rdsync_test.go                          |  368 ++---
 .../0001_Add_replication_pause.patch          |   93 +-
 ...xplicit_cluster_replication_cascades.patch |   34 +-
 .../0003_Add_offline_mode.patch               |   80 +-
 .../0004_Add_waitquorum_command.patch         |  223 ++-
 .../0005_Add_senticache.patch                 | 1337 ++++++++---------
 {redis_patches => valkey_patches}/build.sh    |    8 +-
 68 files changed, 2577 insertions(+), 2462 deletions(-)
 rename internal/{redis => valkey}/node.go (66%)
 rename internal/{redis => valkey}/senticache.go (78%)
 rename internal/{redis => valkey}/shard.go (90%)
 rename internal/{redis => valkey}/tls.go (97%)
 delete mode 100644 tests/images/redis/Dockerfile
 delete mode 100755 tests/images/redis/setup.sh
 delete mode 100755 tests/images/redis/setup_cluster.sh
 delete mode 100755 tests/images/redis/setup_sentinel.sh
 delete mode 100644 tests/images/redis/supervisor_redis.conf
 create mode 100644 tests/images/valkey/Dockerfile
 rename tests/images/{redis => valkey}/default.conf (95%)
 rename tests/images/{redis => valkey}/rdsync_cluster.yaml (84%)
 rename tests/images/{redis => valkey}/rdsync_sentinel.yaml (87%)
 rename tests/images/{redis => valkey}/senticache.conf (100%)
 create mode 100755 tests/images/valkey/setup.sh
 create mode 100755 tests/images/valkey/setup_cluster.sh
 create mode 100755 tests/images/valkey/setup_sentinel.sh
 rename tests/images/{redis => valkey}/supervisor_rdsync.conf (100%)
 rename tests/images/{redis => valkey}/supervisor_senticache.conf (54%)
 create mode 100644 tests/images/valkey/supervisor_valkey.conf
 rename {redis_patches => valkey_patches}/0001_Add_replication_pause.patch (64%)
 rename {redis_patches => valkey_patches}/0002_Allow_explicit_cluster_replication_cascades.patch (59%)
 rename {redis_patches => valkey_patches}/0003_Add_offline_mode.patch (63%)
 rename {redis_patches => valkey_patches}/0004_Add_waitquorum_command.patch (67%)
 rename {redis_patches => valkey_patches}/0005_Add_senticache.patch (51%)
 rename {redis_patches => valkey_patches}/build.sh (57%)

diff --git a/Makefile b/Makefile
index c7f6112..9ee3467 100644
--- a/Makefile
+++ b/Makefile
@@ -16,14 +16,14 @@ unittests:
 	go test ./cmd/... ./internal/...
 	go test ./cmd/... ./tests/testutil/matchers/
 
-redis/src/redis-server:
-	docker run --rm -v ${CURDIR}:/app -w /app ubuntu:noble /app/redis_patches/build.sh
-
-test: base_image redis/src/redis-server cmd/rdsync/rdsync recreate_logs
-	rm -rf ./tests/images/redis/rdsync && cp cmd/rdsync/rdsync ./tests/images/redis/rdsync
-	rm -rf ./tests/images/redis/redis-server && cp redis/src/redis-server ./tests/images/redis/redis-server
-	rm -rf ./tests/images/redis/redis-senticache && cp redis/src/redis-senticache ./tests/images/redis/redis-senticache
-	rm -rf ./tests/images/redis/redis-cli && cp redis/src/redis-cli ./tests/images/redis/redis-cli
+valkey/src/valkey-server:
+	docker run --rm -v ${CURDIR}:/app -w /app ubuntu:noble /app/valkey_patches/build.sh
+
+test: base_image valkey/src/valkey-server cmd/rdsync/rdsync recreate_logs
+	rm -rf ./tests/images/valkey/rdsync && cp cmd/rdsync/rdsync ./tests/images/valkey/rdsync
+	rm -rf ./tests/images/valkey/valkey-server && cp valkey/src/valkey-server ./tests/images/valkey/valkey-server
+	rm -rf ./tests/images/valkey/valkey-senticache && cp valkey/src/valkey-senticache ./tests/images/valkey/valkey-senticache
+	rm -rf ./tests/images/valkey/valkey-cli && cp valkey/src/valkey-cli ./tests/images/valkey/valkey-cli
 	go build ./tests/...
 	(cd tests; go test -timeout 150m)
 
@@ -41,32 +41,32 @@ base_image: tests/images/zookeeper/zookeeper.tar.gz
 		docker build tests/images/base -t rdsync-base:latest;\
 	fi
 
-start_sentinel_env: base_image redis/src/redis-server cmd/rdsync/rdsync recreate_logs
-	rm -rf ./tests/images/redis/rdsync && cp cmd/rdsync/rdsync ./tests/images/redis/rdsync
-	rm -rf ./tests/images/redis/redis-server && cp redis/src/redis-server ./tests/images/redis/redis-server
-	rm -rf ./tests/images/redis/redis-senticache && cp redis/src/redis-senticache ./tests/images/redis/redis-senticache
-	rm -rf ./tests/images/redis/redis-cli && cp redis/src/redis-cli ./tests/images/redis/redis-cli
+start_sentinel_env: base_image valkey/src/valkey-server cmd/rdsync/rdsync recreate_logs
+	rm -rf ./tests/images/valkey/rdsync && cp cmd/rdsync/rdsync ./tests/images/valkey/rdsync
+	rm -rf ./tests/images/valkey/valkey-server && cp valkey/src/valkey-server ./tests/images/valkey/valkey-server
+	rm -rf ./tests/images/valkey/valkey-senticache && cp valkey/src/valkey-senticache ./tests/images/valkey/valkey-senticache
+	rm -rf ./tests/images/valkey/valkey-cli && cp valkey/src/valkey-cli ./tests/images/valkey/valkey-cli
 	docker compose -p $(PROJECT) -f ./tests/images/jepsen-compose.yaml up -d --force-recreate --build
 	timeout 600 docker exec rdsync-zoo1-1 setup_zk.sh
-	timeout 600 docker exec rdsync-redis1-1 setup_sentinel.sh
-	timeout 600 docker exec rdsync-redis2-1 setup_sentinel.sh redis1
-	timeout 600 docker exec rdsync-redis3-1 setup_sentinel.sh redis1
+	timeout 600 docker exec rdsync-valkey1-1 setup_sentinel.sh
+	timeout 600 docker exec rdsync-valkey2-1 setup_sentinel.sh valkey1
+	timeout 600 docker exec rdsync-valkey3-1 setup_sentinel.sh valkey1
 
 run_jepsen_sentinel_test: recreate_logs start_sentinel_env
 	(docker exec rdsync-jepsen-1 /root/jepsen/run.sh >tests/logs/jepsen.log 2>&1 && tail -n 4 tests/logs/jepsen.log) || ./tests/images/jepsen/save_logs.sh
 
 jepsen_sentinel_test: run_jepsen_sentinel_test clean
 
-start_cluster_env: base_image redis/src/redis-server cmd/rdsync/rdsync recreate_logs
-	rm -rf ./tests/images/redis/rdsync && cp cmd/rdsync/rdsync ./tests/images/redis/rdsync
-	rm -rf ./tests/images/redis/redis-server && cp redis/src/redis-server ./tests/images/redis/redis-server
-	rm -rf ./tests/images/redis/redis-senticache && cp redis/src/redis-senticache ./tests/images/redis/redis-senticache
-	rm -rf ./tests/images/redis/redis-cli && cp redis/src/redis-cli ./tests/images/redis/redis-cli
+start_cluster_env: base_image valkey/src/valkey-server cmd/rdsync/rdsync recreate_logs
+	rm -rf ./tests/images/valkey/rdsync && cp cmd/rdsync/rdsync ./tests/images/valkey/rdsync
+	rm -rf ./tests/images/valkey/valkey-server && cp valkey/src/valkey-server ./tests/images/valkey/valkey-server
+	rm -rf ./tests/images/valkey/valkey-senticache && cp valkey/src/valkey-senticache ./tests/images/valkey/valkey-senticache
+	rm -rf ./tests/images/valkey/valkey-cli && cp valkey/src/valkey-cli ./tests/images/valkey/valkey-cli
 	docker compose -p $(PROJECT) -f ./tests/images/jepsen-compose.yaml up -d --force-recreate --build
 	timeout 600 docker exec rdsync-zoo1-1 setup_zk.sh
-	timeout 600 docker exec rdsync-redis1-1 setup_cluster.sh
-	timeout 600 docker exec rdsync-redis2-1 setup_cluster.sh redis1
-	timeout 600 docker exec rdsync-redis3-1 setup_cluster.sh redis1
+	timeout 600 docker exec rdsync-valkey1-1 setup_cluster.sh
+	timeout 600 docker exec rdsync-valkey2-1 setup_cluster.sh valkey1
+	timeout 600 docker exec rdsync-valkey3-1 setup_cluster.sh valkey1
 
 run_jepsen_cluster_test: recreate_logs start_cluster_env
 	(docker exec rdsync-jepsen-1 /root/jepsen/run.sh >tests/logs/jepsen.log 2>&1 && tail -n 4 tests/logs/jepsen.log) || ./tests/images/jepsen/save_logs.sh
diff --git a/README.md b/README.md
index 61deae7..54a8d94 100644
--- a/README.md
+++ b/README.md
@@ -4,14 +4,14 @@
 
 # rdsync
 
-Rdsync is a redis high-availability tool.
-It uses a patched redis version to make a cluster or sentinel-like setup less prone to data loss.
+Rdsync is a valkey high-availability tool.
+It uses a patched valkey version to make a cluster or sentinel-like setup less prone to data loss.
 
 ## Limitations and requirements
 
-* Patched redis (patches for redis 7.2 are included in this repo)
+* Patched valkey (patches for valkey 8.0 are included in this repo)
 * ZooKeeper as DCS
-* Single redis instance per host
+* Single valkey instance per host
 * In clustered setup each shard must have it's own DCS prefix
 * Client application must use `WAITQUORUM` command to make data loss less usual (check jepsen test for example).
 
diff --git a/cmd/rdsync/hosts.go b/cmd/rdsync/hosts.go
index 58c25fa..5a8e135 100644
--- a/cmd/rdsync/hosts.go
+++ b/cmd/rdsync/hosts.go
@@ -12,7 +12,7 @@ import (
 
 var priority int
 var dryRun bool
-var skipRedisCheck bool
+var skipValkeyCheck bool
 
 var hostListCmd = &cobra.Command{
 	Use:     "host",
@@ -47,7 +47,7 @@ var hostAddCmd = &cobra.Command{
 			}
 		})
 
-		os.Exit(app.CliHostAdd(args[0], priorityVal, dryRun, skipRedisCheck))
+		os.Exit(app.CliHostAdd(args[0], priorityVal, dryRun, skipValkeyCheck))
 	},
 }
 
@@ -67,7 +67,7 @@ var hostRemoveCmd = &cobra.Command{
 
 func init() {
 	hostAddCmd.Flags().IntVar(&priority, "priority", 100, "host priority")
-	hostAddCmd.Flags().BoolVar(&skipRedisCheck, "skip-redis-check", false, "do not check redis availability")
+	hostAddCmd.Flags().BoolVar(&skipValkeyCheck, "skip-valkey-check", false, "do not check valkey availability")
 	hostAddCmd.Flags().BoolVar(&dryRun, "dry-run", false, "tests suggested changes."+
 		" Exits codes:"+
 		" 0 - when no changes detected,"+
diff --git a/cmd/rdsync/main.go b/cmd/rdsync/main.go
index 7df5086..b28e422 100644
--- a/cmd/rdsync/main.go
+++ b/cmd/rdsync/main.go
@@ -15,7 +15,7 @@ var verbose bool
 
 var rootCmd = &cobra.Command{
 	Use:   "rdsync",
-	Short: "Rdsync is a Redis HA cluster coordination tool",
+	Short: "Rdsync is a Valkey HA cluster coordination tool",
 	Long:  `Running without additional arguments will start rdsync service for current node.`,
 	Run: func(cmd *cobra.Command, args []string) {
 		app, err := app.NewApp(configFile, "")
diff --git a/cmd/rdsync/state.go b/cmd/rdsync/state.go
index c323aa9..81ee38c 100644
--- a/cmd/rdsync/state.go
+++ b/cmd/rdsync/state.go
@@ -11,7 +11,7 @@ import (
 
 var stateCmd = &cobra.Command{
 	Use:   "state",
-	Short: "Print information from redis hosts",
+	Short: "Print information from valkey hosts",
 	Run: func(cmd *cobra.Command, args []string) {
 		app, err := app.NewApp(configFile, logLevel)
 		if err != nil {
diff --git a/go.mod b/go.mod
index 9894ca0..577acf1 100644
--- a/go.mod
+++ b/go.mod
@@ -11,22 +11,20 @@ require (
 	github.com/go-zookeeper/zk v1.0.4
 	github.com/gofrs/flock v0.12.1
 	github.com/heetch/confita v0.10.0
-	github.com/redis/go-redis/v9 v9.7.0
 	github.com/spf13/cobra v1.8.1
 	github.com/spf13/pflag v1.0.5
 	github.com/stretchr/testify v1.10.0
+	github.com/valkey-io/valkey-go v1.0.53
 	gopkg.in/yaml.v2 v2.4.0
 )
 
 require (
 	github.com/BurntSushi/toml v1.4.0 // indirect
 	github.com/Microsoft/go-winio v0.6.2 // indirect
-	github.com/cespare/xxhash/v2 v2.3.0 // indirect
 	github.com/containerd/log v0.1.0 // indirect
 	github.com/cucumber/gherkin/go/v26 v26.2.0 // indirect
 	github.com/cucumber/messages/go/v21 v21.0.1 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
 	github.com/distribution/reference v0.6.0 // indirect
 	github.com/docker/go-connections v0.5.0 // indirect
 	github.com/docker/go-units v0.5.0 // indirect
diff --git a/go.sum b/go.sum
index 4b2d04d..586c9ad 100644
--- a/go.sum
+++ b/go.sum
@@ -16,14 +16,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
 github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
-github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
-github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
-github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
 github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
 github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
-github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -47,8 +41,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
-github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
 github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
 github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
 github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U=
@@ -187,6 +179,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
 github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
+github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
@@ -212,8 +206,6 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
-github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
 github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
@@ -247,6 +239,8 @@ github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf
 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/valkey-io/valkey-go v1.0.53 h1:bntDqQVPzkLdE/4ypXBrHalXJB+BOTMk+JwXNRCGudg=
+github.com/valkey-io/valkey-go v1.0.53/go.mod h1:BXlVAPIL9rFQinSFM+N32JfWzfCaUAqBpZkc4vPY6fM=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -276,6 +270,8 @@ golang.org/x/crypto v0.0.0-20190506204251-e1dfcc566284/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
diff --git a/internal/app/active_nodes.go b/internal/app/active_nodes.go
index 4e1c05b..4cf1e45 100644
--- a/internal/app/active_nodes.go
+++ b/internal/app/active_nodes.go
@@ -32,9 +32,9 @@ func (app *App) actualizeQuorumReplicas(master string, activeNodes []string) err
 			continue
 		}
 		activeNode := app.shard.Get(host)
-		expected = append(expected, fmt.Sprintf("%s:%d", host, app.config.Redis.Port))
+		expected = append(expected, fmt.Sprintf("%s:%d", host, app.config.Valkey.Port))
 		for _, ip := range activeNode.GetIPs() {
-			expected = append(expected, fmt.Sprintf("%s:%d", ip, app.config.Redis.Port))
+			expected = append(expected, fmt.Sprintf("%s:%d", ip, app.config.Valkey.Port))
 		}
 	}
 
diff --git a/internal/app/aof.go b/internal/app/aof.go
index e84e736..d2fa6f0 100644
--- a/internal/app/aof.go
+++ b/internal/app/aof.go
@@ -25,9 +25,9 @@ func (app *App) adjustAofMode(master string) error {
 			return err
 		}
 	}
-	if app.config.Redis.AofPath != "" && !targetMode {
-		if _, err := os.Stat(app.config.Redis.AofPath); err == nil {
-			return os.RemoveAll(app.config.Redis.AofPath)
+	if app.config.Valkey.AofPath != "" && !targetMode {
+		if _, err := os.Stat(app.config.Valkey.AofPath); err == nil {
+			return os.RemoveAll(app.config.Valkey.AofPath)
 		}
 	}
 	return nil
diff --git a/internal/app/app.go b/internal/app/app.go
index 76b1398..46b8ae4 100644
--- a/internal/app/app.go
+++ b/internal/app/app.go
@@ -14,7 +14,7 @@ import (
 
 	"github.com/yandex/rdsync/internal/config"
 	"github.com/yandex/rdsync/internal/dcs"
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
 // App is main application structure
@@ -29,8 +29,8 @@ type App struct {
 	logger       *slog.Logger
 	config       *config.Config
 	dcs          dcs.DCS
-	shard        *redis.Shard
-	cache        *redis.SentiCacheNode
+	shard        *valkey.Shard
+	cache        *valkey.SentiCacheNode
 	daemonLock   *flock.Flock
 }
 
@@ -136,10 +136,10 @@ func (app *App) Run() int {
 	defer app.dcs.Close()
 	app.dcs.SetDisconnectCallback(func() error { return app.handleCritical() })
 
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 	if app.mode == modeSentinel {
-		app.cache, err = redis.NewSentiCacheNode(app.config, app.logger)
+		app.cache, err = valkey.NewSentiCacheNode(app.config, app.logger)
 		if err != nil {
 			app.logger.Error("Unable to init senticache node", "error", err)
 			return 1
diff --git a/internal/app/cache.go b/internal/app/cache.go
index 9ba8f7d..511d77b 100644
--- a/internal/app/cache.go
+++ b/internal/app/cache.go
@@ -4,11 +4,11 @@ import (
 	"fmt"
 	"time"
 
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
-func (app *App) updateCache(refState map[string]*HostState, cache *redis.SentiCacheNode) error {
-	var state redis.SentiCacheState
+func (app *App) updateCache(refState map[string]*HostState, cache *valkey.SentiCacheNode) error {
+	var state valkey.SentiCacheState
 	masterReadOnly := false
 	for fqdn, hostState := range refState {
 		if hostState == nil || !hostState.PingOk || hostState.Error != "" {
@@ -16,7 +16,7 @@ func (app *App) updateCache(refState map[string]*HostState, cache *redis.SentiCa
 		}
 
 		if hostState.SentiCacheState != nil && fqdn != app.config.Hostname {
-			var sentinel redis.SentiCacheSentinel
+			var sentinel valkey.SentiCacheSentinel
 			sentinel.Name = hostState.SentiCacheState.Name
 			sentinel.RunID = hostState.SentiCacheState.RunID
 			if app.config.SentinelMode.AnnounceHostname {
@@ -47,29 +47,29 @@ func (app *App) updateCache(refState map[string]*HostState, cache *redis.SentiCa
 			} else {
 				state.Master.IP = hostState.IP
 			}
-			state.Master.Port = app.config.Redis.Port
+			state.Master.Port = app.config.Valkey.Port
 			state.Master.RunID = hostState.RunID
 			state.Master.Quorum = len(refState)/2 + 1
-			state.Master.ParallelSyncs = app.config.Redis.MaxParallelSyncs
+			state.Master.ParallelSyncs = app.config.Valkey.MaxParallelSyncs
 			state.Master.ConfigEpoch = 0
 		} else {
 			nc, err := app.shard.GetNodeConfiguration(fqdn)
 			if err != nil {
 				return err
 			}
-			var replica redis.SentiCacheReplica
+			var replica valkey.SentiCacheReplica
 			if app.config.SentinelMode.AnnounceHostname {
 				replica.IP = fqdn
 			} else {
 				replica.IP = hostState.IP
 			}
-			replica.Port = app.config.Redis.Port
+			replica.Port = app.config.Valkey.Port
 			replica.RunID = hostState.RunID
 			replica.MasterLinkDownTime = hostState.ReplicaState.MasterLinkDownTime
 			replica.SlavePriority = nc.Priority
 			replica.ReplicaAnnounced = 1
 			replica.MasterHost = hostState.ReplicaState.MasterHost
-			replica.MasterPort = app.config.Redis.Port
+			replica.MasterPort = app.config.Valkey.Port
 			if hostState.ReplicaState.MasterLinkState {
 				replica.SlaveMasterLinkStatus = 0
 			} else {
diff --git a/internal/app/cli.go b/internal/app/cli.go
index ae2c268..f28fd2e 100644
--- a/internal/app/cli.go
+++ b/internal/app/cli.go
@@ -13,7 +13,7 @@ import (
 	"gopkg.in/yaml.v2"
 
 	"github.com/yandex/rdsync/internal/dcs"
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
 // CliInfo prints DCS-based shard state to stdout
@@ -26,7 +26,7 @@ func (app *App) CliInfo(verbose bool) int {
 	app.dcs.Initialize()
 	defer app.dcs.Close()
 
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 	if err := app.shard.UpdateHostsInfo(); err != nil {
 		app.logger.Error("Unable to update hosts info", "error", err)
@@ -133,7 +133,7 @@ func (app *App) CliState(verbose bool) int {
 	}
 	defer app.dcs.Close()
 	app.dcs.Initialize()
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 
 	if err := app.shard.UpdateHostsInfo(); err != nil {
@@ -192,7 +192,7 @@ func (app *App) CliSwitch(switchFrom, switchTo string, waitTimeout time.Duration
 	}
 	defer app.dcs.Close()
 	app.dcs.Initialize()
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 
 	if err := app.shard.UpdateHostsInfo(); err != nil {
@@ -522,7 +522,7 @@ func (app *App) CliHostList() int {
 	app.dcs.Initialize()
 	defer app.dcs.Close()
 
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 
 	data := make(map[string]interface{})
@@ -545,7 +545,7 @@ func (app *App) CliHostList() int {
 }
 
 // CliHostAdd add hosts to the list of hosts in dcs
-func (app *App) CliHostAdd(host string, priority *int, dryRun bool, skipRedisCheck bool) int {
+func (app *App) CliHostAdd(host string, priority *int, dryRun bool, skipValkeyCheck bool) int {
 	if priority != nil && *priority < 0 {
 		app.logger.Error(fmt.Sprintf("Priority must be >= 0. Got: %d", *priority))
 		return 1
@@ -559,7 +559,7 @@ func (app *App) CliHostAdd(host string, priority *int, dryRun bool, skipRedisChe
 	defer app.dcs.Close()
 	app.dcs.Initialize()
 
-	app.shard = redis.NewShard(app.config, app.logger, app.dcs)
+	app.shard = valkey.NewShard(app.config, app.logger, app.dcs)
 	defer app.shard.Close()
 
 	// root path probably does not exist
@@ -568,8 +568,8 @@ func (app *App) CliHostAdd(host string, priority *int, dryRun bool, skipRedisChe
 		return 1
 	}
 
-	if !skipRedisCheck {
-		node, err := redis.NewNode(app.config, app.logger, host)
+	if !skipValkeyCheck {
+		node, err := valkey.NewNode(app.config, app.logger, host)
 		if err != nil {
 			app.logger.Error(fmt.Sprintf("Failed to check connection to %s, can't tell if it's alive", host), "error", err)
 			return 1
@@ -583,7 +583,7 @@ func (app *App) CliHostAdd(host string, priority *int, dryRun bool, skipRedisChe
 	}
 
 	if !dryRun && priority == nil {
-		err = app.dcs.Set(dcs.JoinPath(pathHANodes, host), *redis.DefaultNodeConfiguration())
+		err = app.dcs.Set(dcs.JoinPath(pathHANodes, host), *valkey.DefaultNodeConfiguration())
 		if err != nil && err != dcs.ErrExists {
 			app.logger.Error(fmt.Sprintf("Unable to create dcs path for %s", host), "error", err)
 			return 1
@@ -627,7 +627,7 @@ func (app *App) CliHostRemove(host string) int {
 }
 
 func (app *App) processPriority(priority *int, dryRun bool, host string) (changes bool, err error) {
-	targetConf := redis.DefaultNodeConfiguration()
+	targetConf := valkey.DefaultNodeConfiguration()
 	if priority != nil {
 		targetConf.Priority = *priority
 	}
diff --git a/internal/app/failover.go b/internal/app/failover.go
index 9447cae..0ff6df2 100644
--- a/internal/app/failover.go
+++ b/internal/app/failover.go
@@ -20,7 +20,7 @@ func countRunningHAReplicas(shardState map[string]*HostState) int {
 
 func (app *App) getFailoverQuorum(activeNodes []string) int {
 	fq := len(activeNodes) - app.getNumReplicasToWrite(activeNodes)
-	if fq < 1 || app.config.Redis.AllowDataLoss {
+	if fq < 1 || app.config.Valkey.AllowDataLoss {
 		fq = 1
 	}
 	return fq
@@ -36,11 +36,11 @@ func (app *App) performFailover(master string) error {
 }
 
 func (app *App) approveFailover(shardState map[string]*HostState, activeNodes []string, master string) error {
-	if app.config.Redis.FailoverTimeout > 0 {
+	if app.config.Valkey.FailoverTimeout > 0 {
 		failedTime := time.Since(app.nodeFailTime[master])
-		if failedTime < app.config.Redis.FailoverTimeout {
+		if failedTime < app.config.Valkey.FailoverTimeout {
 			return fmt.Errorf("failover timeout is not yet elapsed: remaining %v",
-				app.config.Redis.FailoverTimeout-failedTime)
+				app.config.Valkey.FailoverTimeout-failedTime)
 		}
 	}
 	if countRunningHAReplicas(shardState) == len(shardState)-1 {
@@ -64,9 +64,9 @@ func (app *App) approveFailover(shardState map[string]*HostState, activeNodes []
 			return fmt.Errorf("another switchover with cause %s is in progress", lastSwitchover.Cause)
 		}
 		timeAfterLastSwitchover := time.Since(lastSwitchover.Result.FinishedAt)
-		if timeAfterLastSwitchover < app.config.Redis.FailoverCooldown && lastSwitchover.Cause == CauseAuto {
+		if timeAfterLastSwitchover < app.config.Valkey.FailoverCooldown && lastSwitchover.Cause == CauseAuto {
 			return fmt.Errorf("not enough time from last failover %s (cooldown %s)",
-				lastSwitchover.Result.FinishedAt, app.config.Redis.FailoverCooldown)
+				lastSwitchover.Result.FinishedAt, app.config.Valkey.FailoverCooldown)
 		}
 	}
 	return nil
diff --git a/internal/app/manager.go b/internal/app/manager.go
index 5d977a7..b4ce6d0 100644
--- a/internal/app/manager.go
+++ b/internal/app/manager.go
@@ -176,12 +176,12 @@ func (app *App) stateManager() appState {
 			if app.splitTime[master].IsZero() {
 				app.splitTime[master] = time.Now()
 			}
-			if app.config.Redis.FailoverTimeout > 0 {
+			if app.config.Valkey.FailoverTimeout > 0 {
 				failedTime := time.Since(app.splitTime[master])
-				if failedTime < app.config.Redis.FailoverTimeout {
+				if failedTime < app.config.Valkey.FailoverTimeout {
 					app.logger.Error(
 						fmt.Sprintf("According to DCS majority of shard is still alive, but we don't see that from here, will wait for %v before giving up on manager role",
-							app.config.Redis.FailoverTimeout-failedTime))
+							app.config.Valkey.FailoverTimeout-failedTime))
 					return stateManager
 				}
 			}
@@ -195,7 +195,7 @@ func (app *App) stateManager() appState {
 		app.logger.Error("According to DCS majority of shard is still alive, but we don't see that from here. Giving up on manager role")
 		delete(app.splitTime, master)
 		app.dcs.ReleaseLock(pathManagerLock)
-		waitCtx, cancel := context.WithTimeout(app.ctx, app.config.Redis.FailoverTimeout)
+		waitCtx, cancel := context.WithTimeout(app.ctx, app.config.Valkey.FailoverTimeout)
 		defer cancel()
 		ticker := time.NewTicker(app.config.TickInterval)
 		var manager dcs.LockOwner
diff --git a/internal/app/master.go b/internal/app/master.go
index 22be307..5dae99b 100644
--- a/internal/app/master.go
+++ b/internal/app/master.go
@@ -24,7 +24,7 @@ func (app *App) getCurrentMaster(shardState map[string]*HostState) (string, erro
 			return master, nil
 		}
 		if stateMaster != "" && stateMaster != master {
-			app.logger.Warn(fmt.Sprintf("DCS and redis master state diverged: %s and %s", master, stateMaster))
+			app.logger.Warn(fmt.Sprintf("DCS and valkey master state diverged: %s and %s", master, stateMaster))
 			allStable := true
 			for host, state := range shardState {
 				if !state.PingStable || state.IsOffline {
@@ -105,7 +105,7 @@ func (app *App) changeMaster(host, master string) error {
 
 	app.repairReplica(node, masterState, state, master, host)
 
-	deadline := time.Now().Add(app.config.Redis.WaitReplicationTimeout)
+	deadline := time.Now().Add(app.config.Valkey.WaitReplicationTimeout)
 	for time.Now().Before(deadline) {
 		state = app.getHostState(host)
 		rs := state.ReplicaState
@@ -137,7 +137,7 @@ func (app *App) waitForCatchup(host, master string) error {
 		return fmt.Errorf("waiting for %s to catchup with itself", host)
 	}
 
-	deadline := time.Now().Add(app.config.Redis.WaitCatchupTimeout)
+	deadline := time.Now().Add(app.config.Valkey.WaitCatchupTimeout)
 	for time.Now().Before(deadline) {
 		masterState := app.getHostState(master)
 		if !masterState.PingOk {
diff --git a/internal/app/repair.go b/internal/app/repair.go
index 7451f4c..ac05f83 100644
--- a/internal/app/repair.go
+++ b/internal/app/repair.go
@@ -5,7 +5,7 @@ import (
 	"strings"
 	"time"
 
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
 func (app *App) repairShard(shardState map[string]*HostState, activeNodes []string, master string) {
@@ -41,17 +41,17 @@ func (app *App) repairShard(shardState map[string]*HostState, activeNodes []stri
 		}
 		rs := state.ReplicaState
 		if rs == nil || state.IsReplPaused || !replicates(masterState, rs, host, masterNode, true) {
-			if syncing < app.config.Redis.MaxParallelSyncs {
+			if syncing < app.config.Valkey.MaxParallelSyncs {
 				app.repairReplica(node, masterState, state, master, host)
 				syncing++
 			} else {
-				app.logger.Error(fmt.Sprintf("Leaving replica %s broken: currently syncing %d/%d", host, syncing, app.config.Redis.MaxParallelSyncs))
+				app.logger.Error(fmt.Sprintf("Leaving replica %s broken: currently syncing %d/%d", host, syncing, app.config.Valkey.MaxParallelSyncs))
 			}
 		}
 	}
 }
 
-func (app *App) repairMaster(node *redis.Node, activeNodes []string, state *HostState) {
+func (app *App) repairMaster(node *valkey.Node, activeNodes []string, state *HostState) {
 	if state.IsReadOnly || state.MinReplicasToWrite != 0 {
 		err, rewriteErr := node.SetReadWrite(app.ctx)
 		if err != nil {
@@ -85,7 +85,7 @@ func (app *App) repairMaster(node *redis.Node, activeNodes []string, state *Host
 	}
 }
 
-func (app *App) repairReplica(node *redis.Node, masterState, state *HostState, master, replicaFQDN string) {
+func (app *App) repairReplica(node *valkey.Node, masterState, state *HostState, master, replicaFQDN string) {
 	masterNode := app.shard.Get(master)
 	rs := state.ReplicaState
 	if !replicates(masterState, rs, replicaFQDN, masterNode, true) {
@@ -108,9 +108,9 @@ func (app *App) repairReplica(node *redis.Node, masterState, state *HostState, m
 					app.logger.Error(fmt.Sprintf("Unable to make %s replica of %s", node.FQDN(), master), "error", err)
 					return
 				}
-				err = node.ClusterMeet(app.ctx, masterIP, app.config.Redis.Port, app.config.Redis.ClusterBusPort)
+				err = node.ClusterMeet(app.ctx, masterIP, app.config.Valkey.Port, app.config.Valkey.ClusterBusPort)
 				if err != nil {
-					app.logger.Error(fmt.Sprintf("Unable to make %s meet with master %s at %s:%d:%d", node.FQDN(), master, masterIP, app.config.Redis.Port, app.config.Redis.ClusterBusPort), "error", err)
+					app.logger.Error(fmt.Sprintf("Unable to make %s meet with master %s at %s:%d:%d", node.FQDN(), master, masterIP, app.config.Valkey.Port, app.config.Valkey.ClusterBusPort), "error", err)
 					return
 				}
 			}
@@ -143,7 +143,7 @@ func (app *App) repairLocalNode(master string) bool {
 			app.nodeFailTime[local.FQDN()] = time.Now()
 		}
 		failedTime := time.Since(app.nodeFailTime[local.FQDN()])
-		if failedTime > app.config.Redis.RestartTimeout && !strings.HasPrefix(err.Error(), "LOADING ") {
+		if failedTime > app.config.Valkey.RestartTimeout && !strings.HasPrefix(err.Error(), "LOADING ") {
 			app.nodeFailTime[local.FQDN()] = time.Now()
 			err = local.Restart(app.ctx)
 			if err != nil {
diff --git a/internal/app/replication.go b/internal/app/replication.go
index cf887d1..81299a7 100644
--- a/internal/app/replication.go
+++ b/internal/app/replication.go
@@ -3,10 +3,10 @@ package app
 import (
 	"slices"
 
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
-func replicates(masterState *HostState, replicaState *ReplicaState, replicaFQDN string, masterNode *redis.Node, allowSync bool) bool {
+func replicates(masterState *HostState, replicaState *ReplicaState, replicaFQDN string, masterNode *valkey.Node, allowSync bool) bool {
 	if replicaState == nil || !(replicaState.MasterLinkState || allowSync) {
 		return false
 	}
diff --git a/internal/app/switchover.go b/internal/app/switchover.go
index 24f126a..cdadee4 100644
--- a/internal/app/switchover.go
+++ b/internal/app/switchover.go
@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/yandex/rdsync/internal/dcs"
-	"github.com/yandex/rdsync/internal/redis"
+	"github.com/yandex/rdsync/internal/valkey"
 )
 
 const (
@@ -200,7 +200,7 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 			}
 		}
 		if switchover.Cause != CauseAuto {
-			app.waitPoisonPill(app.config.Redis.WaitPoisonPillTimeout)
+			app.waitPoisonPill(app.config.Valkey.WaitPoisonPillTimeout)
 		}
 	}
 
@@ -213,7 +213,7 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 			return err
 		}
 		rs := shardState[host].ReplicaState
-		if (rs == nil || !rs.MasterLinkState) && !app.config.Redis.TurnBeforeSwitchover {
+		if (rs == nil || !rs.MasterLinkState) && !app.config.Valkey.TurnBeforeSwitchover {
 			app.logger.Info(fmt.Sprintf("Switchover: skipping replication pause on %s", host))
 			return nil
 		}
@@ -352,10 +352,10 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 			}
 		}
 		if switchover.Cause != CauseAuto {
-			app.waitPoisonPill(app.config.Redis.WaitPoisonPillTimeout)
+			app.waitPoisonPill(app.config.Valkey.WaitPoisonPillTimeout)
 		}
 
-		if len(aliveActiveNodes) == 1 || app.config.Redis.AllowDataLoss {
+		if len(aliveActiveNodes) == 1 || app.config.Valkey.AllowDataLoss {
 			node := app.shard.Get(newMaster)
 			err, errConf := node.SetReadWrite(app.ctx)
 			if err != nil {
@@ -373,7 +373,7 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 			}
 		}
 
-		if app.config.Redis.TurnBeforeSwitchover {
+		if app.config.Valkey.TurnBeforeSwitchover {
 			var psyncNodes []string
 			for _, host := range aliveActiveNodes {
 				if host == newMaster {
@@ -404,8 +404,8 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 				app.logger.Warn("Unable to psync some replicas before promote", "error", err)
 			}
 		}
-		deadline := time.Now().Add(app.config.Redis.WaitPromoteTimeout)
-		forceDeadline := time.Now().Add(app.config.Redis.WaitPromoteForceTimeout)
+		deadline := time.Now().Add(app.config.Valkey.WaitPromoteTimeout)
+		forceDeadline := time.Now().Add(app.config.Valkey.WaitPromoteForceTimeout)
 		promoted := false
 		for time.Now().Before(deadline) {
 			err = app.promote(newMaster, oldMaster, shardState, forceDeadline)
@@ -463,7 +463,7 @@ func (app *App) performSwitchover(shardState map[string]*HostState, activeNodes
 		shardState, err = app.getShardStateFromDB()
 		if err == nil {
 			sentiCacheUpdateErrs := runParallel(func(host string) error {
-				sentiCacheNode, err := redis.NewRemoteSentiCacheNode(app.config, host, app.logger)
+				sentiCacheNode, err := valkey.NewRemoteSentiCacheNode(app.config, host, app.logger)
 				if err != nil {
 					return err
 				}
diff --git a/internal/config/config.go b/internal/config/config.go
index a79e9e4..7083745 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -13,8 +13,8 @@ import (
 	"github.com/yandex/rdsync/internal/dcs"
 )
 
-// RedisConfig contains redis connection info and params
-type RedisConfig struct {
+// ValkeyConfig contains valkey connection info and params
+type ValkeyConfig struct {
 	Port                    int           `yaml:"port"`
 	ClusterBusPort          int           `yaml:"cluster_bus_port"`
 	UseTLS                  bool          `yaml:"use_tls"`
@@ -22,7 +22,6 @@ type RedisConfig struct {
 	AuthUser                string        `yaml:"auth_user"`
 	AuthPassword            string        `yaml:"auth_password"`
 	DialTimeout             time.Duration `yaml:"dial_timeout"`
-	ReadTimeout             time.Duration `yaml:"read_timeout"`
 	WriteTimeout            time.Duration `yaml:"write_timeout"`
 	DNSTTL                  time.Duration `yaml:"dns_ttl"`
 	FailoverTimeout         time.Duration `yaml:"failover_timeout"`
@@ -40,18 +39,6 @@ type RedisConfig struct {
 	AofPath                 string        `yaml:"aof_path"`
 }
 
-// RedisRenamesConfig contains redis command renames
-type RedisRenamesConfig struct {
-	Client           string `yaml:"client"`
-	Cluster          string `yaml:"cluster"`
-	ClusterFailover  string `yaml:"cluster_failover"`
-	ClusterMyID      string `yaml:"cluster_myid"`
-	ClusterReplicate string `yaml:"cluster_replicate"`
-	ClusterMeet      string `yaml:"cluster_meet"`
-	Config           string `yaml:"config"`
-	ReplicaOf        string `yaml:"replicaof"`
-}
-
 // SentinelModeConfig contains sentinel-mode specific configuration
 type SentinelModeConfig struct {
 	AnnounceHostname    bool   `yaml:"announce_hostname"`
@@ -84,14 +71,13 @@ type Config struct {
 	PprofAddr               string              `yaml:"pprof_addr"`
 	Zookeeper               dcs.ZookeeperConfig `yaml:"zookeeper"`
 	DcsWaitTimeout          time.Duration       `yaml:"dcs_wait_timeout"`
-	Redis                   RedisConfig         `yaml:"redis"`
-	Renames                 RedisRenamesConfig  `yaml:"renames"`
+	Valkey                  ValkeyConfig        `yaml:"valkey"`
 	SentinelMode            SentinelModeConfig  `yaml:"sentinel_mode"`
 }
 
-// DefaultRedisConfig returns default configuration for redis connection info and params
-func DefaultRedisConfig() RedisConfig {
-	return RedisConfig{
+// DefaultValkeyConfig returns default configuration for valkey connection info and params
+func DefaultValkeyConfig() ValkeyConfig {
+	return ValkeyConfig{
 		Port:                    6379,
 		ClusterBusPort:          16379,
 		UseTLS:                  false,
@@ -99,7 +85,6 @@ func DefaultRedisConfig() RedisConfig {
 		AuthUser:                "",
 		AuthPassword:            "",
 		DialTimeout:             5 * time.Second,
-		ReadTimeout:             5 * time.Second,
 		WriteTimeout:            5 * time.Second,
 		DNSTTL:                  5 * time.Minute,
 		FailoverTimeout:         30 * time.Second,
@@ -113,25 +98,11 @@ func DefaultRedisConfig() RedisConfig {
 		MaxParallelSyncs:        1,
 		AllowDataLoss:           false,
 		TurnBeforeSwitchover:    false,
-		RestartCommand:          "systemctl restart redis-server",
+		RestartCommand:          "systemctl restart valkey-server",
 		AofPath:                 "",
 	}
 }
 
-// DefaultRedisRenamesConfig returns default redis command renames
-func DefaultRedisRenamesConfig() RedisRenamesConfig {
-	return RedisRenamesConfig{
-		Client:           "CLIENT",
-		Cluster:          "CLUSTER",
-		ClusterFailover:  "FAILOVER",
-		ClusterMyID:      "MYID",
-		ClusterReplicate: "REPLICATE",
-		ClusterMeet:      "MEET",
-		Config:           "CONFIG",
-		ReplicaOf:        "REPLICAOF",
-	}
-}
-
 func makeFakeRunID(hostname string) (string, error) {
 	hash := sha512.New384()
 	_, err := hash.Write([]byte(hostname))
@@ -155,7 +126,7 @@ func DefaultSentinelModeConfig(hostname string) (SentinelModeConfig, error) {
 		CacheAuthUser:       "",
 		CacheAuthPassword:   "",
 		CachePort:           26379,
-		CacheRestartCommand: "systemctl restart redis-senticache",
+		CacheRestartCommand: "systemctl restart valkey-senticache",
 		CacheUpdateSecret:   "",
 		UseTLS:              false,
 		TLSCAPath:           "",
@@ -193,8 +164,7 @@ func DefaultConfig() (Config, error) {
 		PprofAddr:               "",
 		Zookeeper:               zkConfig,
 		DcsWaitTimeout:          10 * time.Second,
-		Redis:                   DefaultRedisConfig(),
-		Renames:                 DefaultRedisRenamesConfig(),
+		Valkey:                  DefaultValkeyConfig(),
 		SentinelMode:            sentinelConf,
 	}
 	return config, nil
diff --git a/internal/redis/node.go b/internal/valkey/node.go
similarity index 66%
rename from internal/redis/node.go
rename to internal/valkey/node.go
index 19e91d0..b58be82 100644
--- a/internal/redis/node.go
+++ b/internal/valkey/node.go
@@ -1,4 +1,4 @@
-package redis
+package valkey
 
 import (
 	"context"
@@ -11,7 +11,7 @@ import (
 	"strings"
 	"time"
 
-	client "github.com/redis/go-redis/v9"
+	client "github.com/valkey-io/valkey-go"
 
 	"github.com/yandex/rdsync/internal/config"
 )
@@ -21,7 +21,7 @@ const (
 	highMinReplicas = 65535
 )
 
-// Node represents API to query/manipulate a single Redis node
+// Node represents API to query/manipulate a single valkey node
 type Node struct {
 	config      *config.Config
 	logger      *slog.Logger
@@ -31,7 +31,8 @@ type Node struct {
 	clusterID   string
 	infoResults []bool
 	cachedInfo  map[string]string
-	conn        *client.Client
+	conn        client.Client
+	opts        client.ClientOption
 }
 
 func uniqLookup(host string) ([]net.IP, error) {
@@ -68,34 +69,41 @@ func NewNode(config *config.Config, logger *slog.Logger, fqdn string) (*Node, er
 		ips = []net.IP{}
 		now = time.Time{}
 	}
-	addr := net.JoinHostPort(host, strconv.Itoa(config.Redis.Port))
-	opts := client.Options{
-		Addr:            addr,
-		Username:        config.Redis.AuthUser,
-		Password:        config.Redis.AuthPassword,
-		DialTimeout:     config.Redis.DialTimeout,
-		ReadTimeout:     config.Redis.ReadTimeout,
-		WriteTimeout:    config.Redis.WriteTimeout,
-		PoolSize:        1,
-		MaxRetries:      -1,
-		ConnMaxIdleTime: -1,
-		Protocol:        2,
-	}
-	if config.Redis.UseTLS {
-		tlsConf, err := getTLSConfig(config, config.Redis.TLSCAPath, host)
+	addr := net.JoinHostPort(host, strconv.Itoa(config.Valkey.Port))
+	opts := client.ClientOption{
+		InitAddress:           []string{addr},
+		Username:              config.Valkey.AuthUser,
+		Password:              config.Valkey.AuthPassword,
+		Dialer:                net.Dialer{Timeout: config.Valkey.DialTimeout},
+		ConnWriteTimeout:      config.Valkey.WriteTimeout,
+		AlwaysRESP2:           true,
+		ForceSingleClient:     true,
+		DisableAutoPipelining: true,
+		DisableCache:          true,
+		BlockingPoolMinSize:   1,
+		BlockingPoolCleanup:   time.Second,
+	}
+	if config.Valkey.UseTLS {
+		tlsConf, err := getTLSConfig(config, config.Valkey.TLSCAPath, host)
 		if err != nil {
 			return nil, err
 		}
 		opts.TLSConfig = tlsConf
 	}
+	conn, err := client.NewClient(opts)
+	if err != nil {
+		logger.Warn("Unable to establish initial connection", "fqdn", host, "error", err)
+		conn = nil
+	}
 	node := Node{
 		clusterID: "",
 		config:    config,
-		conn:      client.NewClient(&opts),
+		conn:      conn,
 		logger:    nodeLogger,
 		fqdn:      fqdn,
 		ips:       ips,
 		ipsTime:   now,
+		opts:      opts,
 	}
 	return &node, nil
 }
@@ -114,9 +122,22 @@ func (n *Node) String() string {
 	return n.fqdn
 }
 
-// Close closes underlying Redis connection
-func (n *Node) Close() error {
-	return n.conn.Close()
+// Close closes underlying valkey connection
+func (n *Node) Close() {
+	if n.conn != nil {
+		n.conn.Close()
+	}
+}
+
+func (n *Node) ensureConn() error {
+	if n.conn == nil {
+		conn, err := client.NewClient(n.opts)
+		if err != nil {
+			return err
+		}
+		n.conn = conn
+	}
+	return nil
 }
 
 // MatchHost checks if node has target hostname or ip
@@ -138,7 +159,7 @@ func (n *Node) MatchHost(host string) bool {
 
 // RefreshAddrs updates internal ip address list if ttl exceeded
 func (n *Node) RefreshAddrs() error {
-	if time.Since(n.ipsTime) < n.config.Redis.DNSTTL {
+	if time.Since(n.ipsTime) < n.config.Valkey.DNSTTL {
 		n.logger.Debug("Not updating ips cache due to ttl")
 		return nil
 	}
@@ -172,18 +193,25 @@ func (n *Node) GetIPs() []string {
 }
 
 func (n *Node) configRewrite(ctx context.Context) error {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "rewrite")
-	return setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().ConfigRewrite().Build()).Error()
 }
 
 // IsReplPaused returns pause status of replication on node
 func (n *Node) IsReplPaused(ctx context.Context) (bool, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "repl-paused")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
+	if err != nil {
+		return false, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("repl-paused").Build())
+	err = cmd.Error()
 	if err != nil {
 		return false, err
 	}
-	vals, err := cmd.Result()
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return false, err
 	}
@@ -195,8 +223,12 @@ func (n *Node) IsReplPaused(ctx context.Context) (bool, error) {
 
 // PauseReplication pauses replication from master on node
 func (n *Node) PauseReplication(ctx context.Context) error {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "repl-paused", "yes")
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "repl-paused", "yes").Build())
+	err = cmd.Error()
 	if err != nil {
 		return err
 	}
@@ -205,8 +237,12 @@ func (n *Node) PauseReplication(ctx context.Context) error {
 
 // ResumeReplication starts replication from master on node
 func (n *Node) ResumeReplication(ctx context.Context) error {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "repl-paused", "no")
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "repl-paused", "no").Build())
+	err = cmd.Error()
 	if err != nil {
 		return err
 	}
@@ -215,12 +251,16 @@ func (n *Node) ResumeReplication(ctx context.Context) error {
 
 // IsOffline returns Offline status for node
 func (n *Node) IsOffline(ctx context.Context) (bool, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "offline")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
 	if err != nil {
 		return false, err
 	}
-	vals, err := cmd.Result()
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("offline").Build())
+	err = cmd.Error()
+	if err != nil {
+		return false, err
+	}
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return false, err
 	}
@@ -235,8 +275,12 @@ func (n *Node) SetOffline(ctx context.Context) error {
 	if !n.IsLocal() {
 		return fmt.Errorf("making %s offline is not possible - not local", n.fqdn)
 	}
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "offline", "yes")
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "offline", "yes").Build())
+	err = cmd.Error()
 	if err != nil {
 		return err
 	}
@@ -253,18 +297,25 @@ func (n *Node) SetOffline(ctx context.Context) error {
 
 // DisconnectClients disconnects all connected clients with specified type
 func (n *Node) DisconnectClients(ctx context.Context, ctype string) error {
-	disconnectCmd := n.conn.Do(ctx, n.config.Renames.Client, "kill", "type", ctype)
-	return disconnectCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().Arbitrary("CLIENT", "KILL", "TYPE", ctype).Build()).Error()
 }
 
 // GetNumQuorumReplicas returns number of connected replicas to accept writes on node
 func (n *Node) GetNumQuorumReplicas(ctx context.Context) (int, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "quorum-replicas-to-write")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
+	if err != nil {
+		return 0, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("quorum-replicas-to-write").Build())
+	err = cmd.Error()
 	if err != nil {
 		return 0, err
 	}
-	vals, err := cmd.Result()
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return 0, err
 	}
@@ -280,8 +331,12 @@ func (n *Node) GetNumQuorumReplicas(ctx context.Context) (int, error) {
 
 // SetNumQuorumReplicas sets desired number of connected replicas to accept writes on node
 func (n *Node) SetNumQuorumReplicas(ctx context.Context, value int) (error, error) {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "quorum-replicas-to-write", strconv.Itoa(value))
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err, nil
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "quorum-replicas-to-write", strconv.Itoa(value)).Build())
+	err = cmd.Error()
 	if err != nil {
 		return err, nil
 	}
@@ -290,12 +345,16 @@ func (n *Node) SetNumQuorumReplicas(ctx context.Context, value int) (error, erro
 
 // GetQuorumReplicas returns a set of quorum replicas
 func (n *Node) GetQuorumReplicas(ctx context.Context) (string, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "quorum-replicas")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
+	if err != nil {
+		return "", err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("quorum-replicas").Build())
+	err = cmd.Error()
 	if err != nil {
 		return "", err
 	}
-	vals, err := cmd.Result()
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return "", err
 	}
@@ -309,8 +368,12 @@ func (n *Node) GetQuorumReplicas(ctx context.Context) (string, error) {
 
 // SetQuorumReplicas sets desired quorum replicas
 func (n *Node) SetQuorumReplicas(ctx context.Context, value string) (error, error) {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "quorum-replicas", value)
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err, nil
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "quorum-replicas", value).Build())
+	err = cmd.Error()
 	if err != nil {
 		return err, nil
 	}
@@ -337,12 +400,16 @@ func (n *Node) EmptyQuorumReplicas(ctx context.Context) error {
 
 // GetAppendonly returns a setting of appendonly config
 func (n *Node) GetAppendonly(ctx context.Context) (bool, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "appendonly")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
 	if err != nil {
 		return false, err
 	}
-	vals, err := cmd.Result()
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("appendonly").Build())
+	err = cmd.Error()
+	if err != nil {
+		return false, err
+	}
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return false, err
 	}
@@ -358,8 +425,11 @@ func (n *Node) SetAppendonly(ctx context.Context, value bool) error {
 	if !value {
 		strValue = "no"
 	}
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "appendonly", strValue)
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	err = n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "appendonly", strValue).Build()).Error()
 	if err != nil {
 		return err
 	}
@@ -368,12 +438,16 @@ func (n *Node) SetAppendonly(ctx context.Context, value bool) error {
 
 // GetMinReplicasToWrite returns number of replicas required to write on node
 func (n *Node) GetMinReplicasToWrite(ctx context.Context) (int64, error) {
-	cmd := client.NewStringSliceCmd(ctx, n.config.Renames.Config, "get", "min-replicas-to-write")
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
+	if err != nil {
+		return 0, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ConfigGet().Parameter("min-replicas-to-write").Build())
+	err = cmd.Error()
 	if err != nil {
 		return 0, err
 	}
-	vals, err := cmd.Result()
+	vals, err := cmd.AsStrSlice()
 	if err != nil {
 		return 0, err
 	}
@@ -394,8 +468,11 @@ func (n *Node) IsReadOnly(minReplicasToWrite int64) bool {
 
 // SetReadOnly makes node read-only by setting min replicas to unreasonably high value and disconnecting clients
 func (n *Node) SetReadOnly(ctx context.Context, disconnect bool) (error, error) {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "min-replicas-to-write", strconv.Itoa(highMinReplicas))
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err, nil
+	}
+	err = n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "min-replicas-to-write", strconv.Itoa(highMinReplicas)).Build()).Error()
 	if err != nil {
 		return err, nil
 	}
@@ -415,8 +492,11 @@ func (n *Node) SetReadOnly(ctx context.Context, disconnect bool) (error, error)
 
 // SetReadOnly makes node returns min-replicas-to-write to zero
 func (n *Node) SetReadWrite(ctx context.Context) (error, error) {
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "min-replicas-to-write", "0")
-	err := setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err, nil
+	}
+	err = n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "min-replicas-to-write", "0").Build()).Error()
 	if err != nil {
 		return err, nil
 	}
@@ -428,25 +508,33 @@ func (n *Node) SetOnline(ctx context.Context) error {
 	if !n.IsLocal() {
 		return fmt.Errorf("making %s online is not possible - not local", n.fqdn)
 	}
-	setCmd := n.conn.Do(ctx, n.config.Renames.Config, "set", "offline", "no")
-	return setCmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().Arbitrary("CONFIG", "SET", "offline", "no").Build()).Error()
 }
 
-// Restart restarts redis server
+// Restart restarts valkey server
 func (n *Node) Restart(ctx context.Context) error {
 	if !n.IsLocal() {
 		return fmt.Errorf("restarting %s is not possible - not local", n.fqdn)
 	}
-	n.logger.Warn(fmt.Sprintf("Restarting with %s", n.config.Redis.RestartCommand))
-	split := strings.Fields(n.config.Redis.RestartCommand)
+	n.logger.Warn(fmt.Sprintf("Restarting with %s", n.config.Valkey.RestartCommand))
+	split := strings.Fields(n.config.Valkey.RestartCommand)
 	cmd := exec.CommandContext(ctx, split[0], split[1:]...)
 	return cmd.Run()
 }
 
 // GetInfo returns raw info map
 func (n *Node) GetInfo(ctx context.Context) (map[string]string, error) {
-	cmd := n.conn.Info(ctx)
-	err := cmd.Err()
+	var err error
+	var cmd client.ValkeyResult
+	err = n.ensureConn()
+	if err == nil {
+		cmd = n.conn.Do(ctx, n.conn.B().Info().Build())
+		err = cmd.Error()
+	}
 	if err != nil {
 		n.infoResults = append(n.infoResults, false)
 		if len(n.infoResults) > n.config.PingStable {
@@ -465,7 +553,10 @@ func (n *Node) GetInfo(ctx context.Context) (map[string]string, error) {
 		return n.cachedInfo, err
 	}
 
-	inp := cmd.Val()
+	inp, err := cmd.ToString()
+	if err != nil {
+		return nil, err
+	}
 	lines := strings.Count(inp, "\r\n")
 	res := make(map[string]string, lines)
 	pos := 0
@@ -512,8 +603,7 @@ func (n *Node) SentinelMakeReplica(ctx context.Context, target string) error {
 	if err != nil {
 		return err
 	}
-	cmd := n.conn.Do(ctx, n.config.Renames.ReplicaOf, target, n.config.Redis.Port)
-	err = cmd.Err()
+	err = n.conn.Do(ctx, n.conn.B().Replicaof().Host(target).Port(int64(n.config.Valkey.Port)).Build()).Error()
 	if err != nil {
 		return err
 	}
@@ -522,8 +612,11 @@ func (n *Node) SentinelMakeReplica(ctx context.Context, target string) error {
 
 // SentinelPromote makes node primary in sentinel mode
 func (n *Node) SentinelPromote(ctx context.Context) error {
-	cmd := n.conn.Do(ctx, n.config.Renames.ReplicaOf, "NO", "ONE")
-	err := cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	err = n.conn.Do(ctx, n.conn.B().Replicaof().No().One().Build()).Error()
 	if err != nil {
 		return err
 	}
@@ -535,17 +628,17 @@ func (n *Node) ClusterGetID(ctx context.Context) (string, error) {
 	if n.clusterID != "" {
 		return n.clusterID, nil
 	}
-	cmd := client.NewStringCmd(ctx, n.config.Renames.Cluster, n.config.Renames.ClusterMyID)
-	err := n.conn.Process(ctx, cmd)
+	err := n.ensureConn()
 	if err != nil {
 		return "", err
 	}
-	clusterID, err := cmd.Result()
+	cmd := n.conn.Do(ctx, n.conn.B().ClusterMyid().Build())
+	err = cmd.Error()
 	if err != nil {
 		return "", err
 	}
-	n.clusterID = clusterID
-	return n.clusterID, nil
+	n.clusterID, err = cmd.ToString()
+	return n.clusterID, err
 }
 
 // ClusterMakeReplica makes node replica of target in cluster mode
@@ -554,20 +647,27 @@ func (n *Node) ClusterMakeReplica(ctx context.Context, targetID string) error {
 	if err != nil {
 		return err
 	}
-	cmd := n.conn.Do(ctx, n.config.Renames.Cluster, n.config.Renames.ClusterReplicate, targetID)
-	return cmd.Err()
+	return n.conn.Do(ctx, n.conn.B().ClusterReplicate().NodeId(targetID).Build()).Error()
 }
 
 // IsClusterMajorityAlive checks if majority of masters in cluster are not failed
 func (n *Node) IsClusterMajorityAlive(ctx context.Context) (bool, error) {
-	cmd := n.conn.ClusterNodes(ctx)
-	err := cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return false, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ClusterNodes().Build())
+	err = cmd.Error()
 	if err != nil {
 		return false, err
 	}
 	totalMasters := 0
 	failedMasters := 0
-	lines := strings.Split(cmd.Val(), "\n")
+	strVal, err := cmd.ToString()
+	if err != nil {
+		return false, err
+	}
+	lines := strings.Split(strVal, "\n")
 	for _, line := range lines {
 		split := strings.Split(line, " ")
 		if len(split) < 3 {
@@ -587,24 +687,38 @@ func (n *Node) IsClusterMajorityAlive(ctx context.Context) (bool, error) {
 
 // ClusterPromoteForce makes node primary in cluster mode if master/majority of masters is reachable
 func (n *Node) ClusterPromoteForce(ctx context.Context) error {
-	cmd := n.conn.Do(ctx, n.config.Renames.Cluster, n.config.Renames.ClusterFailover, "FORCE")
-	return cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().ClusterFailover().Force().Build()).Error()
 }
 
 // ClusterPromoteTakeover makes node primary in cluster mode if majority of masters is not reachable
 func (n *Node) ClusterPromoteTakeover(ctx context.Context) error {
-	cmd := n.conn.Do(ctx, n.config.Renames.Cluster, n.config.Renames.ClusterFailover, "TAKEOVER")
-	return cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().ClusterFailover().Takeover().Build()).Error()
 }
 
 // IsClusterNodeAlone checks if node sees only itself
 func (n *Node) IsClusterNodeAlone(ctx context.Context) (bool, error) {
-	cmd := n.conn.ClusterNodes(ctx)
-	err := cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return false, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ClusterNodes().Build())
+	err = cmd.Error()
 	if err != nil {
 		return false, err
 	}
-	lines := strings.Split(cmd.Val(), "\n")
+	strVal, err := cmd.ToString()
+	if err != nil {
+		return false, err
+	}
+	lines := strings.Split(strVal, "\n")
 	var count int
 	for _, line := range lines {
 		if len(strings.TrimSpace(line)) > 0 {
@@ -616,18 +730,29 @@ func (n *Node) IsClusterNodeAlone(ctx context.Context) (bool, error) {
 
 // ClusterMeet makes replica join the cluster
 func (n *Node) ClusterMeet(ctx context.Context, addr string, port, clusterBusPort int) error {
-	cmd := n.conn.Do(ctx, n.config.Renames.Cluster, n.config.Renames.ClusterMeet, addr, strconv.Itoa(port), strconv.Itoa(clusterBusPort))
-	return cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return err
+	}
+	return n.conn.Do(ctx, n.conn.B().ClusterMeet().Ip(addr).Port(int64(port)).ClusterBusPort(int64(clusterBusPort)).Build()).Error()
 }
 
 // HasClusterSlots checks if node has any slot assigned
 func (n *Node) HasClusterSlots(ctx context.Context) (bool, error) {
-	cmd := n.conn.ClusterNodes(ctx)
-	err := cmd.Err()
+	err := n.ensureConn()
+	if err != nil {
+		return false, err
+	}
+	cmd := n.conn.Do(ctx, n.conn.B().ClusterNodes().Build())
+	err = cmd.Error()
+	if err != nil {
+		return false, err
+	}
+	strVal, err := cmd.ToString()
 	if err != nil {
 		return false, err
 	}
-	lines := strings.Split(cmd.Val(), "\n")
+	lines := strings.Split(strVal, "\n")
 	for _, line := range lines {
 		split := strings.Split(line, " ")
 		if len(split) < 3 {
diff --git a/internal/redis/senticache.go b/internal/valkey/senticache.go
similarity index 78%
rename from internal/redis/senticache.go
rename to internal/valkey/senticache.go
index 3829611..5eeb111 100644
--- a/internal/redis/senticache.go
+++ b/internal/valkey/senticache.go
@@ -1,4 +1,4 @@
-package redis
+package valkey
 
 import (
 	"context"
@@ -10,7 +10,7 @@ import (
 	"strings"
 	"time"
 
-	client "github.com/redis/go-redis/v9"
+	client "github.com/valkey-io/valkey-go"
 
 	"github.com/yandex/rdsync/internal/config"
 )
@@ -23,7 +23,7 @@ type SentiCacheSentinel struct {
 	Port  int
 }
 
-// SentiCacheReplica represents the redis replica as seen by senticache
+// SentiCacheReplica represents the valkey replica as seen by senticache
 type SentiCacheReplica struct {
 	IP                    string
 	Port                  int
@@ -37,7 +37,7 @@ type SentiCacheReplica struct {
 	SlaveReplOffset       int64
 }
 
-// SentiCacheMaster represents the redis master as seen by senticache
+// SentiCacheMaster represents the valkey master as seen by senticache
 type SentiCacheMaster struct {
 	Name          string
 	IP            string
@@ -55,28 +55,30 @@ type SentiCacheState struct {
 	Sentinels []SentiCacheSentinel
 }
 
-// SentiCacheNode represents API to query/manipulate a single Redis SentiCache node
+// SentiCacheNode represents API to query/manipulate a single Valkey SentiCache node
 type SentiCacheNode struct {
 	config *config.Config
 	logger *slog.Logger
-	conn   *client.Client
+	conn   client.Client
+	opts   client.ClientOption
 	broken bool
 }
 
 // NewRemoteSentiCacheNode is a remote SentiCacheNode constructor
 func NewRemoteSentiCacheNode(config *config.Config, host string, logger *slog.Logger) (*SentiCacheNode, error) {
 	addr := net.JoinHostPort(host, strconv.Itoa(config.SentinelMode.CachePort))
-	opts := client.Options{
-		Addr:            addr,
-		Username:        config.SentinelMode.CacheAuthUser,
-		Password:        config.SentinelMode.CacheAuthPassword,
-		DialTimeout:     config.Redis.DialTimeout,
-		ReadTimeout:     config.Redis.ReadTimeout,
-		WriteTimeout:    config.Redis.WriteTimeout,
-		PoolSize:        1,
-		MinIdleConns:    1,
-		ConnMaxLifetime: time.Hour,
-		Protocol:        2,
+	opts := client.ClientOption{
+		InitAddress:           []string{addr},
+		Username:              config.SentinelMode.CacheAuthUser,
+		Password:              config.SentinelMode.CacheAuthPassword,
+		Dialer:                net.Dialer{Timeout: config.Valkey.DialTimeout},
+		ConnWriteTimeout:      config.Valkey.WriteTimeout,
+		AlwaysRESP2:           true,
+		ForceSingleClient:     true,
+		DisableAutoPipelining: true,
+		DisableCache:          true,
+		BlockingPoolMinSize:   1,
+		BlockingPoolCleanup:   time.Second,
 	}
 	if config.SentinelMode.UseTLS {
 		tlsConf, err := getTLSConfig(config, config.SentinelMode.TLSCAPath, host)
@@ -85,9 +87,15 @@ func NewRemoteSentiCacheNode(config *config.Config, host string, logger *slog.Lo
 		}
 		opts.TLSConfig = tlsConf
 	}
+	conn, err := client.NewClient(opts)
+	if err != nil {
+		logger.Warn("Unable to establish initial connection", "fqdn", host, "error", err)
+		conn = nil
+	}
 	node := SentiCacheNode{
 		config: config,
-		conn:   client.NewClient(&opts),
+		conn:   conn,
+		opts:   opts,
 		logger: logger.With("module", "senticache"),
 		broken: false,
 	}
@@ -99,9 +107,22 @@ func NewSentiCacheNode(config *config.Config, logger *slog.Logger) (*SentiCacheN
 	return NewRemoteSentiCacheNode(config, localhost, logger)
 }
 
-// Close closes underlying Redis connection
-func (s *SentiCacheNode) Close() error {
-	return s.conn.Close()
+// Close closes underlying Valkey connection
+func (s *SentiCacheNode) Close() {
+	if s.conn != nil {
+		s.conn.Close()
+	}
+}
+
+func (s *SentiCacheNode) ensureConn() error {
+	if s.conn == nil {
+		conn, err := client.NewClient(s.opts)
+		if err != nil {
+			return err
+		}
+		s.conn = conn
+	}
+	return nil
 }
 
 func (s *SentiCacheNode) restart(ctx context.Context) error {
@@ -112,22 +133,27 @@ func (s *SentiCacheNode) restart(ctx context.Context) error {
 }
 
 func (s *SentiCacheNode) sentinels(ctx context.Context) ([]SentiCacheSentinel, error) {
-	cmd := client.NewSliceCmd(ctx, "SENTINEL", "SENTINELS", "1")
-	err := s.conn.Process(ctx, cmd)
+	err := s.ensureConn()
 	if err != nil {
 		return []SentiCacheSentinel{}, err
 	}
-	val, err := cmd.Result()
+	cmd := s.conn.Do(ctx, s.conn.B().SentinelSentinels().Master("1").Build())
+	err = cmd.Error()
+	if err != nil {
+		return []SentiCacheSentinel{}, err
+	}
+	val, err := cmd.ToArray()
 	if err != nil {
 		return []SentiCacheSentinel{}, err
 	}
 	res := make([]SentiCacheSentinel, len(val))
 	for index, rawSentinel := range val {
 		sentinel := SentiCacheSentinel{}
-		sentinelInt := rawSentinel.([]interface{})
-		for i := 0; i < len(sentinelInt)/2; i += 2 {
-			key := sentinelInt[i].(string)
-			value := sentinelInt[i+1].(string)
+		sentinelInt, err := rawSentinel.AsStrMap()
+		if err != nil {
+			return []SentiCacheSentinel{}, err
+		}
+		for key, value := range sentinelInt {
 			switch key {
 			case "name":
 				sentinel.Name = value
@@ -148,12 +174,16 @@ func (s *SentiCacheNode) sentinels(ctx context.Context) ([]SentiCacheSentinel, e
 }
 
 func (s *SentiCacheNode) master(ctx context.Context) (*SentiCacheMaster, error) {
-	cmd := client.NewSliceCmd(ctx, "SENTINEL", "MASTERS")
-	err := s.conn.Process(ctx, cmd)
+	err := s.ensureConn()
+	if err != nil {
+		return nil, err
+	}
+	cmd := s.conn.Do(ctx, s.conn.B().Arbitrary("SENTINEL", "MASTERS").Build())
+	err = cmd.Error()
 	if err != nil {
 		return nil, err
 	}
-	val, err := cmd.Result()
+	val, err := cmd.ToArray()
 	if err != nil {
 		return nil, err
 	}
@@ -163,10 +193,11 @@ func (s *SentiCacheNode) master(ctx context.Context) (*SentiCacheMaster, error)
 		return nil, fmt.Errorf("got %d masters in senticache", len(val))
 	}
 	var res SentiCacheMaster
-	master := val[0].([]interface{})
-	for i := 0; i < len(master)/2; i += 2 {
-		key := master[i].(string)
-		value := master[i+1].(string)
+	master, err := val[0].AsStrMap()
+	if err != nil {
+		return nil, err
+	}
+	for key, value := range master {
 		switch key {
 		case "name":
 			res.Name = value
@@ -202,22 +233,27 @@ func (s *SentiCacheNode) master(ctx context.Context) (*SentiCacheMaster, error)
 }
 
 func (s *SentiCacheNode) replicas(ctx context.Context) ([]SentiCacheReplica, error) {
-	cmd := client.NewSliceCmd(ctx, "SENTINEL", "REPLICAS", "1")
-	err := s.conn.Process(ctx, cmd)
+	err := s.ensureConn()
 	if err != nil {
 		return []SentiCacheReplica{}, err
 	}
-	val, err := cmd.Result()
+	cmd := s.conn.Do(ctx, s.conn.B().SentinelReplicas().Master("1").Build())
+	err = cmd.Error()
+	if err != nil {
+		return []SentiCacheReplica{}, err
+	}
+	val, err := cmd.ToArray()
 	if err != nil {
 		return []SentiCacheReplica{}, err
 	}
 	res := make([]SentiCacheReplica, len(val))
 	for index, rawReplica := range val {
 		replica := SentiCacheReplica{}
-		replicaInt := rawReplica.([]interface{})
-		for i := 0; i < len(replicaInt)/2; i += 2 {
-			key := replicaInt[i].(string)
-			value := replicaInt[i+1].(string)
+		replicaInt, err := rawReplica.AsStrMap()
+		if err != nil {
+			return []SentiCacheReplica{}, err
+		}
+		for key, value := range replicaInt {
 			switch key {
 			case "ip":
 				replica.IP = value
@@ -347,12 +383,7 @@ func (s *SentiCacheNode) Update(ctx context.Context, state *SentiCacheState) err
 		)
 	}
 	s.logger.Debug(fmt.Sprintf("Updating senticache state with %v", command))
-	cmd := make([]interface{}, len(command))
-	for i, v := range command {
-		cmd[i] = v
-	}
-	res := s.conn.Do(ctx, cmd...)
-	err = res.Err()
+	err = s.conn.Do(ctx, s.conn.B().Arbitrary(command...).Build()).Error()
 	if err != nil {
 		s.broken = true
 		return err
diff --git a/internal/redis/shard.go b/internal/valkey/shard.go
similarity index 90%
rename from internal/redis/shard.go
rename to internal/valkey/shard.go
index 3d0a094..a83ab93 100644
--- a/internal/redis/shard.go
+++ b/internal/valkey/shard.go
@@ -1,4 +1,4 @@
-package redis
+package valkey
 
 import (
 	"fmt"
@@ -10,7 +10,7 @@ import (
 	"github.com/yandex/rdsync/internal/dcs"
 )
 
-// Shard contains a set of Redis nodes
+// Shard contains a set of valkey nodes
 type Shard struct {
 	sync.Mutex
 	config *config.Config
@@ -20,7 +20,7 @@ type Shard struct {
 	dcs    dcs.DCS
 }
 
-// NodeConfiguration is a dcs node configuration for redis replica
+// NodeConfiguration is a dcs node configuration for valkey replica
 type NodeConfiguration struct {
 	// Priority - is a host priority to become master. Can be changed via CLI.
 	Priority int `json:"priority"`
@@ -82,9 +82,7 @@ func (s *Shard) UpdateHostsInfo() error {
 	for hostname := range s.nodes {
 		if _, found := set[hostname]; !found {
 			if s.local == nil || hostname != s.local.FQDN() {
-				if err = s.nodes[hostname].Close(); err != nil {
-					return err
-				}
+				s.nodes[hostname].Close()
 			}
 			delete(s.nodes, hostname)
 		}
@@ -93,7 +91,7 @@ func (s *Shard) UpdateHostsInfo() error {
 	return nil
 }
 
-// Get returns Redis Node by host name
+// Get returns Valkey Node by host name
 func (s *Shard) Get(host string) *Node {
 	s.Lock()
 	defer s.Unlock()
@@ -101,7 +99,7 @@ func (s *Shard) Get(host string) *Node {
 	return s.nodes[host]
 }
 
-// Local returns Redis Node running on the same not as current rdsync process
+// Local returns Valkey Node running on the same not as current rdsync process
 func (s *Shard) Local() *Node {
 	return s.local
 }
@@ -112,7 +110,7 @@ func (s *Shard) Close() {
 	defer s.Unlock()
 
 	for _, node := range s.nodes {
-		_ = node.Close()
+		node.Close()
 	}
 }
 
diff --git a/internal/redis/tls.go b/internal/valkey/tls.go
similarity index 97%
rename from internal/redis/tls.go
rename to internal/valkey/tls.go
index 7d35c93..51dc4db 100644
--- a/internal/redis/tls.go
+++ b/internal/valkey/tls.go
@@ -1,4 +1,4 @@
-package redis
+package valkey
 
 import (
 	"crypto/tls"
diff --git a/tests/features/00_cluster_smoke.feature b/tests/features/00_cluster_smoke.feature
index 77636d6..6de134f 100644
--- a/tests/features/00_cluster_smoke.feature
+++ b/tests/features/00_cluster_smoke.feature
@@ -2,62 +2,62 @@ Feature: Cluster mode smoke tests
 
     Scenario: Cluster mode initially works
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        And path "/var/lib/redis/appendonlydir" does not exist on "redis1"
-        And path "/var/lib/redis/appendonlydir" exists on "redis2"
-        And path "/var/lib/redis/appendonlydir" exists on "redis3"
+        And path "/var/lib/valkey/appendonlydir" does not exist on "valkey1"
+        And path "/var/lib/valkey/appendonlydir" exists on "valkey2"
+        And path "/var/lib/valkey/appendonlydir" exists on "valkey3"
 
     Scenario: Cluster mode duplicate ip resolve does not break rdsync
         Given clustered shard is up and running
-        When I run command on host "redis1"
-        """
-            echo '192.168.234.14 redis2 test1' >> /etc/hosts
-            echo '192.168.234.14 redis2 test2' >> /etc/hosts
-            echo '192.168.234.15 redis3 test3' >> /etc/hosts
-            echo '192.168.234.15 redis3 test4' >> /etc/hosts
-        """
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        When I run command on host "valkey1"
+        """
+            echo '192.168.234.14 valkey2 test1' >> /etc/hosts
+            echo '192.168.234.14 valkey2 test2' >> /etc/hosts
+            echo '192.168.234.15 valkey3 test3' >> /etc/hosts
+            echo '192.168.234.15 valkey3 test4' >> /etc/hosts
+        """
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis3"
+        When I run command on host "valkey3"
         """
             supervisorctl stop rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl stop rdsync
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             supervisorctl stop rdsync
         """
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
-            CONFIG SET quorum-replicas redis2:6379
+            CONFIG SET quorum-replicas valkey2:6379
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             supervisorctl start rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl start rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl start rdsync
         """
@@ -67,25 +67,25 @@ Feature: Cluster mode smoke tests
         """
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
-            .*redis2.*
+            .*valkey2.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
-            .*redis3.*
+            .*valkey3.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
             .*192.168.234.14.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
             .*192.168.234.15.*
         """
diff --git a/tests/features/00_sentinel_smoke.feature b/tests/features/00_sentinel_smoke.feature
index d65a98d..3cbfa73 100644
--- a/tests/features/00_sentinel_smoke.feature
+++ b/tests/features/00_sentinel_smoke.feature
@@ -2,68 +2,68 @@ Feature: Sentinel mode smoke tests
 
     Scenario: Sentinel mode initially works
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
-        And path "/var/lib/redis/appendonlydir" does not exist on "redis1"
-        And path "/var/lib/redis/appendonlydir" exists on "redis2"
-        And path "/var/lib/redis/appendonlydir" exists on "redis3"
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
+        And path "/var/lib/valkey/appendonlydir" does not exist on "valkey1"
+        And path "/var/lib/valkey/appendonlydir" exists on "valkey2"
+        And path "/var/lib/valkey/appendonlydir" exists on "valkey3"
 
     Scenario: Sentinel mode duplicate ip resolve does not break rdsync
         Given sentinel shard is up and running
-        When I run command on host "redis1"
-        """
-            echo '192.168.234.14 redis2 test1' >> /etc/hosts
-            echo '192.168.234.14 redis2 test2' >> /etc/hosts
-            echo '192.168.234.15 redis3 test3' >> /etc/hosts
-            echo '192.168.234.15 redis3 test4' >> /etc/hosts
-        """
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        When I run command on host "valkey1"
+        """
+            echo '192.168.234.14 valkey2 test1' >> /etc/hosts
+            echo '192.168.234.14 valkey2 test2' >> /etc/hosts
+            echo '192.168.234.15 valkey3 test3' >> /etc/hosts
+            echo '192.168.234.15 valkey3 test4' >> /etc/hosts
+        """
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
-        When I run command on host "redis3"
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
+        When I run command on host "valkey3"
         """
             supervisorctl stop rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl stop rdsync
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             supervisorctl stop rdsync
         """
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
-            CONFIG SET quorum-replicas redis2:6379
+            CONFIG SET quorum-replicas valkey2:6379
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             supervisorctl start rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl start rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl start rdsync
         """
@@ -73,25 +73,25 @@ Feature: Sentinel mode smoke tests
         """
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """ 
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
-            .*redis2.*
+            .*valkey2.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
-            .*redis3.*
+            .*valkey3.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
             .*192.168.234.14.*
         """
-        And redis cmd result should match regexp
+        And valkey cmd result should match regexp
         """
             .*192.168.234.15.*
         """
diff --git a/tests/features/01_cluster_maintenance.feature b/tests/features/01_cluster_maintenance.feature
index 09bf138..cded069 100644
--- a/tests/features/01_cluster_maintenance.feature
+++ b/tests/features/01_cluster_maintenance.feature
@@ -4,7 +4,7 @@ Feature: Cluster mode maintenance tests
         Given clustered shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/maintenance" to
         """
@@ -29,7 +29,7 @@ Feature: Cluster mode maintenance tests
         Then zookeeper node "/test/maintenance" should not exist within "30" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/maintenance" to
         """
@@ -47,25 +47,25 @@ Feature: Cluster mode maintenance tests
         When I delete zookeeper node "/test/maintenance"
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
 
     Scenario: Cluster mode maintenance enter sets quorum-replicas-to-write to 0 on master
         Given clustered shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I wait for "60" seconds
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas-to-write
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*quorum-replicas-to-write 1.*
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -81,11 +81,11 @@ Feature: Cluster mode maintenance tests
         }
         """
         And zookeeper node "/test/active_nodes" should not exist
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas-to-write
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*quorum-replicas-to-write *0.*
         """
@@ -94,9 +94,9 @@ Feature: Cluster mode maintenance tests
         Given clustered shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -112,19 +112,19 @@ Feature: Cluster mode maintenance tests
         }
         """
         And zookeeper node "/test/active_nodes" should not exist
-        When I run command on redis host "redis2"
+        When I run command on valkey host "valkey2"
         """
             CLUSTER FAILOVER
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        And redis host "redis1" should become replica of "redis2" within "15" seconds
-        And replication on redis host "redis1" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis2" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
-        When I run command on host "redis1"
+        And valkey host "valkey1" should become replica of "valkey2" within "15" seconds
+        And replication on valkey host "valkey1" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey2" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
+        When I run command on host "valkey1"
         """
             rdsync mnt off
         """
@@ -135,9 +135,9 @@ Feature: Cluster mode maintenance tests
         """
         Then zookeeper node "/test/master" should match json within "30" seconds
         """
-            "redis2"
+            "valkey2"
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maintenance
         """
@@ -151,9 +151,9 @@ Feature: Cluster mode maintenance tests
         Given clustered shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -172,52 +172,52 @@ Feature: Cluster mode maintenance tests
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync info
         """
         Then command return code should be "1"
         When I wait for "30" seconds
-        Then redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
-        When I run command on host "redis1" with timeout "20" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
-        When I run command on host "redis2" with timeout "20" seconds
+        When I run command on host "valkey2" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
-        When I run command on host "redis2" with timeout "20" seconds
+        When I run command on host "valkey2" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
         When I wait for "30" seconds
-        Then redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
         Then zookeeper node "/test/maintenance" should match json within "90" seconds
         """
         {
-            "initiated_by": "redis1"
+            "initiated_by": "valkey1"
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint off
         """
         Then command return code should be "0"
-        And redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
-        And zookeeper node "/test/health/redis1" should match json within "30" seconds
+        And valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
+        And zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -225,14 +225,14 @@ Feature: Cluster mode maintenance tests
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
diff --git a/tests/features/01_sentinel_maintenance.feature b/tests/features/01_sentinel_maintenance.feature
index e5e359b..4c709ff 100644
--- a/tests/features/01_sentinel_maintenance.feature
+++ b/tests/features/01_sentinel_maintenance.feature
@@ -4,11 +4,11 @@ Feature: Sentinel mode maintenance tests
         Given sentinel shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
         When I set zookeeper node "/test/maintenance" to
         """
         {
@@ -32,7 +32,7 @@ Feature: Sentinel mode maintenance tests
         Then zookeeper node "/test/maintenance" should not exist within "30" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/maintenance" to
         """
@@ -47,31 +47,31 @@ Feature: Sentinel mode maintenance tests
         }
         """
         And zookeeper node "/test/active_nodes" should not exist
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
         When I delete zookeeper node "/test/maintenance"
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
 
     Scenario: Sentinel mode maintenance enter sets quorum-replicas-to-write to 0 on master
         Given sentinel shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I wait for "60" seconds
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas-to-write
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*quorum-replicas-to-write 1.*
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -87,11 +87,11 @@ Feature: Sentinel mode maintenance tests
         }
         """
         And zookeeper node "/test/active_nodes" should not exist
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG GET quorum-replicas-to-write
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*quorum-replicas-to-write *0.*
         """
@@ -100,9 +100,9 @@ Feature: Sentinel mode maintenance tests
         Given sentinel shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -118,35 +118,35 @@ Feature: Sentinel mode maintenance tests
         }
         """
         And zookeeper node "/test/active_nodes" should not exist
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
-            REPLICAOF redis2 6379
+            REPLICAOF valkey2 6379
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis2"
+        When I run command on valkey host "valkey2"
         """
             REPLICAOF NO ONE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
-            REPLICAOF redis2 6379
+            REPLICAOF valkey2 6379
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        And redis host "redis1" should become replica of "redis2" within "15" seconds
-        And replication on redis host "redis1" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis2" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
-        When I run command on host "redis1"
+        And valkey host "valkey1" should become replica of "valkey2" within "15" seconds
+        And replication on valkey host "valkey1" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey2" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
+        When I run command on host "valkey1"
         """
             rdsync mnt off
         """
@@ -157,12 +157,12 @@ Feature: Sentinel mode maintenance tests
         """
         Then zookeeper node "/test/master" should match json within "30" seconds
         """
-            "redis2"
+            "valkey2"
         """
-        And senticache host "redis1" should have master "redis2" within "30" seconds
-        And senticache host "redis2" should have master "redis2" within "30" seconds
-        And senticache host "redis3" should have master "redis2" within "30" seconds
-        When I run command on host "redis1"
+        And senticache host "valkey1" should have master "valkey2" within "30" seconds
+        And senticache host "valkey2" should have master "valkey2" within "30" seconds
+        And senticache host "valkey3" should have master "valkey2" within "30" seconds
+        When I run command on host "valkey1"
         """
             rdsync maintenance
         """
@@ -176,12 +176,12 @@ Feature: Sentinel mode maintenance tests
         Given sentinel shard is up and running
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
-        When I run command on host "redis1"
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
+        When I run command on host "valkey1"
         """
             rdsync maint on
         """
@@ -200,55 +200,55 @@ Feature: Sentinel mode maintenance tests
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync info
         """
         Then command return code should be "1"
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
         When I wait for "30" seconds
-        Then redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
-        When I run command on host "redis1" with timeout "20" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
-        When I run command on host "redis2" with timeout "20" seconds
+        When I run command on host "valkey2" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
-        When I run command on host "redis2" with timeout "20" seconds
+        When I run command on host "valkey2" with timeout "20" seconds
         """
             supervisorctl restart rdsync
         """
         Then command return code should be "0"
         When I wait for "30" seconds
-        Then redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
         Then zookeeper node "/test/maintenance" should match json within "90" seconds
         """
         {
-            "initiated_by": "redis1"
+            "initiated_by": "valkey1"
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             rdsync maint off
         """
         Then command return code should be "0"
-        And redis host "redis1" should be master
-        And redis host "redis2" should be replica of "redis1"
-        And redis host "redis3" should be replica of "redis1"
-        And zookeeper node "/test/health/redis1" should match json within "30" seconds
+        And valkey host "valkey1" should be master
+        And valkey host "valkey2" should be replica of "valkey1"
+        And valkey host "valkey3" should be replica of "valkey1"
+        And zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -256,20 +256,20 @@ Feature: Sentinel mode maintenance tests
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And senticache host "redis1" should have master "redis1" within "30" seconds
-        And senticache host "redis2" should have master "redis1" within "30" seconds
-        And senticache host "redis3" should have master "redis1" within "30" seconds
+        And senticache host "valkey1" should have master "valkey1" within "30" seconds
+        And senticache host "valkey2" should have master "valkey1" within "30" seconds
+        And senticache host "valkey3" should have master "valkey1" within "30" seconds
diff --git a/tests/features/02_cluster_switchover_from.feature b/tests/features/02_cluster_switchover_from.feature
index 81ffe6f..61c746c 100644
--- a/tests/features/02_cluster_switchover_from.feature
+++ b/tests/features/02_cluster_switchover_from.feature
@@ -2,7 +2,7 @@ Feature: Cluster mode switchover from old master
 
     Scenario: Cluster mode switchover (from) with healthy master works
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,23 +10,23 @@ Feature: Cluster mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --from redis1
+            rdsync switch --from valkey1
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -36,7 +36,7 @@ Feature: Cluster mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -44,14 +44,14 @@ Feature: Cluster mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
         When I wait for "30" seconds
-        Then path "/var/lib/redis/appendonlydir" exists on "redis1"
-        Then path "/var/lib/redis/appendonlydir" does not exist on "{{.new_master}}"
+        Then path "/var/lib/valkey/appendonlydir" exists on "valkey1"
+        Then path "/var/lib/valkey/appendonlydir" does not exist on "{{.new_master}}"
 
     Scenario: Cluster mode switchover (from) with unhealthy replicas is rejected
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -59,25 +59,25 @@ Feature: Cluster mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is killed
-        And redis on host "redis2" is killed
-        And I run command on host "redis1"
+        When valkey on host "valkey3" is killed
+        And valkey on host "valkey2" is killed
+        And I run command on host "valkey1"
         """
-            rdsync switch --from redis1 --wait=0s
+            rdsync switch --from valkey1 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -87,10 +87,10 @@ Feature: Cluster mode switchover from old master
         And zookeeper node "/test/last_rejected_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "result": {
                 "ok": false,
                 "error": "no quorum, have 0 replicas while 2 is required"
@@ -100,7 +100,7 @@ Feature: Cluster mode switchover from old master
 
     Scenario: Cluster mode switchover (from) with unhealthy replicas is not rejected if was approved before
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -108,43 +108,43 @@ Feature: Cluster mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        And redis on host "redis2" is stopped
+        When valkey on host "valkey3" is stopped
+        And valkey on host "valkey2" is stopped
         And I set zookeeper node "/test/current_switch" to
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "run_count": 1
         }
         """
         Then zookeeper node "/test/last_switch" should not exist within "30" seconds
         And zookeeper node "/test/last_rejected_switch" should not exist within "30" seconds
-        When redis on host "redis3" is started
-        And redis on host "redis2" is started
+        When valkey on host "valkey3" is started
+        And valkey on host "valkey2" is started
         Then zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "result": {
                 "ok": true
             }
@@ -153,7 +153,7 @@ Feature: Cluster mode switchover from old master
 
     Scenario: Cluster mode switchover (from) works with dead replica
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -161,22 +161,22 @@ Feature: Cluster mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -185,11 +185,11 @@ Feature: Cluster mode switchover from old master
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --from redis1 --wait=0s
+            rdsync switch --from valkey1 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -199,7 +199,7 @@ Feature: Cluster mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -207,11 +207,11 @@ Feature: Cluster mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
 
     Scenario: Cluster mode switchover (from) with read-only fs master works
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -219,27 +219,27 @@ Feature: Cluster mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            chattr +i /etc/redis/redis.conf
+            chattr +i /etc/valkey/valkey.conf
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
-            rdsync switch --from redis1
+            rdsync switch --from valkey1
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -249,7 +249,7 @@ Feature: Cluster mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -257,9 +257,9 @@ Feature: Cluster mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
         # Just to make docker cleanup happy
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            chattr -i /etc/redis/redis.conf
+            chattr -i /etc/valkey/valkey.conf
         """
diff --git a/tests/features/02_sentinel_switchover_from.feature b/tests/features/02_sentinel_switchover_from.feature
index efcb5c7..daf674d 100644
--- a/tests/features/02_sentinel_switchover_from.feature
+++ b/tests/features/02_sentinel_switchover_from.feature
@@ -2,7 +2,7 @@ Feature: Sentinel mode switchover from old master
 
     Scenario: Sentinel mode switchover with healthy master works
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,23 +10,23 @@ Feature: Sentinel mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --from redis1
+            rdsync switch --from valkey1
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -36,7 +36,7 @@ Feature: Sentinel mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -44,17 +44,17 @@ Feature: Sentinel mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
         When I wait for "30" seconds
-        Then path "/var/lib/redis/appendonlydir" exists on "redis1"
-        Then path "/var/lib/redis/appendonlydir" does not exist on "{{.new_master}}"
+        Then path "/var/lib/valkey/appendonlydir" exists on "valkey1"
+        Then path "/var/lib/valkey/appendonlydir" does not exist on "{{.new_master}}"
 
     Scenario: Sentinel mode switchover with unhealthy replicas is rejected
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -62,25 +62,25 @@ Feature: Sentinel mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is killed
-        And redis on host "redis2" is killed
-        And I run command on host "redis1"
+        When valkey on host "valkey3" is killed
+        And valkey on host "valkey2" is killed
+        And I run command on host "valkey1"
         """
-            rdsync switch --from redis1 --wait=0s
+            rdsync switch --from valkey1 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -90,10 +90,10 @@ Feature: Sentinel mode switchover from old master
         And zookeeper node "/test/last_rejected_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "result": {
                 "ok": false,
                 "error": "no quorum, have 0 replicas while 2 is required"
@@ -103,7 +103,7 @@ Feature: Sentinel mode switchover from old master
 
     Scenario: Sentinel mode switchover with unhealthy replicas is not rejected if was approved before
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -111,43 +111,43 @@ Feature: Sentinel mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        And redis on host "redis2" is stopped
+        When valkey on host "valkey3" is stopped
+        And valkey on host "valkey2" is stopped
         And I set zookeeper node "/test/current_switch" to
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "run_count": 1
         }
         """
         Then zookeeper node "/test/last_switch" should not exist within "30" seconds
         And zookeeper node "/test/last_rejected_switch" should not exist within "30" seconds
-        When redis on host "redis3" is started
-        And redis on host "redis2" is started
+        When valkey on host "valkey3" is started
+        And valkey on host "valkey2" is started
         Then zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "to": "",
             "cause": "manual",
-            "initiated_by": "redis1",
+            "initiated_by": "valkey1",
             "result": {
                 "ok": true
             }
@@ -155,14 +155,14 @@ Feature: Sentinel mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
 
     Scenario: Sentinel mode switchover works with dead replica
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -170,22 +170,22 @@ Feature: Sentinel mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -194,11 +194,11 @@ Feature: Sentinel mode switchover from old master
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --from redis1 --wait=0s
+            rdsync switch --from valkey1 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -208,7 +208,7 @@ Feature: Sentinel mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -216,14 +216,14 @@ Feature: Sentinel mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
 
     Scenario: Sentinel mode switchover (from) with read-only fs master works
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -231,27 +231,27 @@ Feature: Sentinel mode switchover from old master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            chattr +i /etc/redis/redis.conf
+            chattr +i /etc/valkey/valkey.conf
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
-            rdsync switch --from redis1
+            rdsync switch --from valkey1
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -261,7 +261,7 @@ Feature: Sentinel mode switchover from old master
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -269,12 +269,12 @@ Feature: Sentinel mode switchover from old master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
         # Just to make docker cleanup happy
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            chattr -i /etc/redis/redis.conf
+            chattr -i /etc/valkey/valkey.conf
         """
diff --git a/tests/features/03_cluster_switchover_to.feature b/tests/features/03_cluster_switchover_to.feature
index 22407a7..8a720b3 100644
--- a/tests/features/03_cluster_switchover_to.feature
+++ b/tests/features/03_cluster_switchover_to.feature
@@ -2,7 +2,7 @@ Feature: Cluster mode switchover to specified host
 
     Scenario: Cluster mode switchover (to) with healthy master works
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,23 +10,23 @@ Feature: Cluster mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --to redis2
+            rdsync switch --to valkey2
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -36,7 +36,7 @@ Feature: Cluster mode switchover to specified host
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "to": "redis2",
+            "to": "valkey2",
             "result": {
                 "ok": true
             }
@@ -44,13 +44,13 @@ Feature: Cluster mode switchover to specified host
         """
         And zookeeper node "/test/master" should match regexp within "30" seconds
         """
-            redis2
+            valkey2
         """
-        And redis host "redis2" should be master
+        And valkey host "valkey2" should be master
 
     Scenario: Cluster mode switchover (to) works with dead replica
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -58,22 +58,22 @@ Feature: Cluster mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -82,11 +82,11 @@ Feature: Cluster mode switchover to specified host
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --to redis2 --wait=0s
+            rdsync switch --to valkey2 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -96,7 +96,7 @@ Feature: Cluster mode switchover to specified host
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "to": "redis2",
+            "to": "valkey2",
             "result": {
                 "ok": true
             }
@@ -104,13 +104,13 @@ Feature: Cluster mode switchover to specified host
         """
         And zookeeper node "/test/master" should match regexp within "30" seconds
         """
-            redis2
+            valkey2
         """
-        And redis host "redis2" should be master
+        And valkey host "valkey2" should be master
 
     Scenario: Cluster mode switchover to non-active host fails
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -118,22 +118,22 @@ Feature: Cluster mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -142,14 +142,14 @@ Feature: Cluster mode switchover to specified host
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-        rdsync switch --to redis3 --wait=0s
+        rdsync switch --to valkey3 --wait=0s
         """
         Then command return code should be "1"
         And command output should match regexp
         """
-        redis3 is not active
+        valkey3 is not active
         """
diff --git a/tests/features/03_sentinel_switchover_to.feature b/tests/features/03_sentinel_switchover_to.feature
index 0ad08f2..8205963 100644
--- a/tests/features/03_sentinel_switchover_to.feature
+++ b/tests/features/03_sentinel_switchover_to.feature
@@ -2,7 +2,7 @@ Feature: Sentinel mode switchover to specified host
 
     Scenario: Sentinel mode switchover (to) with healthy master works
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,23 +10,23 @@ Feature: Sentinel mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --to redis2
+            rdsync switch --to valkey2
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -36,7 +36,7 @@ Feature: Sentinel mode switchover to specified host
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "to": "redis2",
+            "to": "valkey2",
             "result": {
                 "ok": true
             }
@@ -44,16 +44,16 @@ Feature: Sentinel mode switchover to specified host
         """
         And zookeeper node "/test/master" should match regexp within "30" seconds
         """
-            redis2
+            valkey2
         """
-        And redis host "redis2" should be master
-        And senticache host "redis1" should have master "redis2" within "30" seconds
-        And senticache host "redis2" should have master "redis2" within "30" seconds
-        And senticache host "redis3" should have master "redis2" within "30" seconds
+        And valkey host "valkey2" should be master
+        And senticache host "valkey1" should have master "valkey2" within "30" seconds
+        And senticache host "valkey2" should have master "valkey2" within "30" seconds
+        And senticache host "valkey3" should have master "valkey2" within "30" seconds
 
     Scenario: Sentinel mode switchover (to) works with dead replica
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -61,22 +61,22 @@ Feature: Sentinel mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -85,11 +85,11 @@ Feature: Sentinel mode switchover to specified host
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync switch --to redis2 --wait=0s
+            rdsync switch --to valkey2 --wait=0s
         """
         Then command return code should be "0"
         And command output should match regexp
@@ -99,7 +99,7 @@ Feature: Sentinel mode switchover to specified host
         And zookeeper node "/test/last_switch" should match json within "30" seconds
         """
         {
-            "to": "redis2",
+            "to": "valkey2",
             "result": {
                 "ok": true
             }
@@ -107,16 +107,16 @@ Feature: Sentinel mode switchover to specified host
         """
         And zookeeper node "/test/master" should match regexp within "30" seconds
         """
-            redis2
+            valkey2
         """
-        And redis host "redis2" should be master
-        And senticache host "redis1" should have master "redis2" within "30" seconds
-        And senticache host "redis2" should have master "redis2" within "30" seconds
-        And senticache host "redis3" should have master "redis2" within "30" seconds
+        And valkey host "valkey2" should be master
+        And senticache host "valkey1" should have master "valkey2" within "30" seconds
+        And senticache host "valkey2" should have master "valkey2" within "30" seconds
+        And senticache host "valkey3" should have master "valkey2" within "30" seconds
 
     Scenario: Sentinel mode switchover to non-active host fails
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -124,22 +124,22 @@ Feature: Sentinel mode switchover to specified host
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis3" is stopped
-        Then zookeeper node "/test/health/redis3" should match json within "30" seconds
+        When valkey on host "valkey3" is stopped
+        Then zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": false,
@@ -148,14 +148,14 @@ Feature: Sentinel mode switchover to specified host
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-        rdsync switch --to redis3 --wait=0s
+        rdsync switch --to valkey3 --wait=0s
         """
         Then command return code should be "1"
         And command output should match regexp
         """
-        redis3 is not active
+        valkey3 is not active
         """
diff --git a/tests/features/04_cluster_failover.feature b/tests/features/04_cluster_failover.feature
index 22afc50..37ca58b 100644
--- a/tests/features/04_cluster_failover.feature
+++ b/tests/features/04_cluster_failover.feature
@@ -2,7 +2,7 @@ Feature: Cluster mode failover from dead master
 
     Scenario: Cluster mode failover from dead master works
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,31 +10,31 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -42,14 +42,14 @@ Feature: Cluster mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
 
     Scenario: Cluster mode failover does not work in absence of quorum
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -57,35 +57,35 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis1" is killed
-        And redis on host "redis2" is killed
-        Then redis host "redis1" should become unavailable within "10" seconds
-        And redis host "redis2" should become unavailable within "10" seconds
+        When valkey on host "valkey1" is killed
+        And valkey on host "valkey2" is killed
+        Then valkey host "valkey1" should become unavailable within "10" seconds
+        And valkey host "valkey2" should become unavailable within "10" seconds
         When I wait for "60" seconds
-        Then redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey3" should be replica of "valkey1"
         And zookeeper node "/test/master" should match regexp
         """
-            redis1
+            valkey1
         """
         And zookeeper node "/test/manager" should match regexp
         """
-            redis1
+            valkey1
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             grep Failover /var/log/rdsync.log
         """
@@ -96,7 +96,7 @@ Feature: Cluster mode failover from dead master
 
     Scenario: Cluster mode failover selects active replica based on priority
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -104,45 +104,45 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync host add redis2 --priority 200
+            rdsync host add valkey2 --priority 200
         """
         Then command return code should be "0"
         And command output should match regexp
         """
             host has been added
         """
-        When redis on host "redis1" is killed
+        When valkey on host "valkey1" is killed
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
         }
         """
-        Then redis host "redis2" should be master
+        Then valkey host "valkey2" should be master
 
     Scenario: Cluster mode failover works with dynamic quorum
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -150,31 +150,31 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -182,14 +182,14 @@ Feature: Cluster mode failover from dead master
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "20" seconds
         """
-        ["redis2","redis3"]
+        ["valkey2","valkey3"]
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
         When I delete zookeeper node "/test/last_switch"
         When host "{{.new_master}}" is stopped
-        Then redis host "{{.new_master}}" should become unavailable within "10" seconds
+        Then valkey host "{{.new_master}}" should become unavailable within "10" seconds
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
@@ -201,11 +201,11 @@ Feature: Cluster mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
 
     Scenario: Cluster mode failover cooldown is respected
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -213,31 +213,31 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -245,16 +245,16 @@ Feature: Cluster mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
         When host "{{.new_master}}" is stopped
-        Then redis host "{{.new_master}}" should become unavailable within "10" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should become unavailable within "10" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
         And zookeeper node "/test/manager" should match regexp within "10" seconds
         """
-            .*redis.*
+            .*valkey.*
         """
         When I get zookeeper node "/test/manager"
         And I save zookeeper query result as "new_manager"
@@ -270,7 +270,7 @@ Feature: Cluster mode failover from dead master
 
     Scenario: Cluster mode failover delay is respected
         Given clustered shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -278,25 +278,25 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         When I wait for "10" seconds
-        Then redis host "redis2" should be replica of "redis1"
-        Then redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey2" should be replica of "valkey1"
+        Then valkey host "valkey3" should be replica of "valkey1"
         When I get zookeeper node "/test/manager"
         And I save zookeeper query result as "new_manager"
         And I run command on host "{{.new_manager.hostname}}"
@@ -310,71 +310,71 @@ Feature: Cluster mode failover from dead master
 
     Scenario: Cluster mode failover works for 2 node shard
         Given clustered shard is up and running
-        When host "redis3" is deleted
-        Then redis host "redis3" should become unavailable within "10" seconds
+        When host "valkey3" is deleted
+        Then valkey host "valkey3" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis2.*
+            .*valkey2.*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
         }
         """
-        Then redis host "redis2" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "redis2" within "30" seconds
+        Then valkey host "valkey2" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "valkey2" within "30" seconds
 
     Scenario: Cluster mode failover fails for 2 node shard with lagging replica
         Given clustered shard is up and running
-        When host "redis3" is deleted
-        Then redis host "redis3" should become unavailable within "10" seconds
+        When host "valkey3" is deleted
+        Then valkey host "valkey3" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When host "redis2" is stopped
-        Then redis host "redis2" should become unavailable within "10" seconds
+        When host "valkey2" is stopped
+        Then valkey host "valkey2" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1"]
+            ["valkey1"]
         """
         When I wait for "30" seconds
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             SET MYKEY TESTVALUE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             OK
         """
         When I wait for "30" seconds
-        And host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
-        When host "redis2" is started
-        Then redis host "redis2" should become available within "10" seconds
+        And host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
+        When host "valkey2" is started
+        Then valkey host "valkey2" should become available within "10" seconds
         Then zookeeper node "/test/manager" should match regexp within "10" seconds
         """
-            .*redis2.*
+            .*valkey2.*
         """
         Then zookeeper node "/test/master" should match regexp
         """
-            .*redis1.*
+            .*valkey1.*
         """
         When I wait for "60" seconds
-        When I run command on host "redis2"
+        When I run command on host "valkey2"
         """
             grep Failover /var/log/rdsync.log
         """
@@ -386,7 +386,7 @@ Feature: Cluster mode failover from dead master
     Scenario: Cluster mode master restart with disabled persistence causes failover
         Given clustered shard is up and running
         And persistence is disabled
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -394,31 +394,31 @@ Feature: Cluster mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             SET very-important-key foo
         """
         And I wait for "1" seconds
-        And redis on host "redis1" is restarted
+        And valkey on host "valkey1" is restarted
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -426,14 +426,14 @@ Feature: Cluster mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
-        When I run command on redis host "{{.new_master}}"
+        Then valkey host "{{.new_master}}" should be master
+        And valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
+        When I run command on valkey host "{{.new_master}}"
         """
             GET very-important-key
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*foo.*
         """
diff --git a/tests/features/04_sentinel_failover.feature b/tests/features/04_sentinel_failover.feature
index 8a0e424..204be3d 100644
--- a/tests/features/04_sentinel_failover.feature
+++ b/tests/features/04_sentinel_failover.feature
@@ -2,7 +2,7 @@ Feature: Sentinel mode failover from dead master
 
     Scenario: Sentinel mode failover from dead master works
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -10,31 +10,31 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -42,17 +42,17 @@ Feature: Sentinel mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
 
     Scenario: Sentinel mode failover does not work in absence of quorum
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -60,35 +60,35 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When redis on host "redis1" is killed
-        And redis on host "redis2" is killed
-        Then redis host "redis1" should become unavailable within "10" seconds
-        And redis host "redis2" should become unavailable within "10" seconds
+        When valkey on host "valkey1" is killed
+        And valkey on host "valkey2" is killed
+        Then valkey host "valkey1" should become unavailable within "10" seconds
+        And valkey host "valkey2" should become unavailable within "10" seconds
         When I wait for "60" seconds
-        Then redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey3" should be replica of "valkey1"
         And zookeeper node "/test/master" should match regexp
         """
-            redis1
+            valkey1
         """
         And zookeeper node "/test/manager" should match regexp
         """
-            redis1
+            valkey1
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             grep Failover /var/log/rdsync.log
         """
@@ -99,7 +99,7 @@ Feature: Sentinel mode failover from dead master
 
     Scenario: Sentinel mode failover selects active replica based on priority
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -107,45 +107,45 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
-            rdsync host add redis2 --priority 200
+            rdsync host add valkey2 --priority 200
         """
         Then command return code should be "0"
         And command output should match regexp
         """
             host has been added
         """
-        When redis on host "redis1" is killed
+        When valkey on host "valkey1" is killed
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
         }
         """
-        Then redis host "redis2" should be master
+        Then valkey host "valkey2" should be master
 
     Scenario: Sentinel mode failover works with dynamic quorum
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -153,31 +153,31 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -185,14 +185,14 @@ Feature: Sentinel mode failover from dead master
         """
         And zookeeper node "/test/active_nodes" should match json_exactly within "20" seconds
         """
-        ["redis2","redis3"]
+        ["valkey2","valkey3"]
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
         When I delete zookeeper node "/test/last_switch"
         When host "{{.new_master}}" is stopped
-        Then redis host "{{.new_master}}" should become unavailable within "10" seconds
+        Then valkey host "{{.new_master}}" should become unavailable within "10" seconds
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
@@ -204,12 +204,12 @@ Feature: Sentinel mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
+        Then valkey host "{{.new_master}}" should be master
         And senticache host "{{.new_master}}" should have master "{{.new_master}}" within "30" seconds
 
     Scenario: Sentinel mode failover cooldown is respected
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -217,31 +217,31 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -249,16 +249,16 @@ Feature: Sentinel mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
         When host "{{.new_master}}" is stopped
-        Then redis host "{{.new_master}}" should become unavailable within "10" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
+        Then valkey host "{{.new_master}}" should become unavailable within "10" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
         And zookeeper node "/test/manager" should match regexp within "10" seconds
         """
-            .*redis.*
+            .*valkey.*
         """
         When I get zookeeper node "/test/manager"
         And I save zookeeper query result as "new_manager"
@@ -274,7 +274,7 @@ Feature: Sentinel mode failover from dead master
 
     Scenario: Sentinel mode failover delay is respected
         Given sentinel shard is up and running
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -282,25 +282,25 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         When I wait for "10" seconds
-        Then redis host "redis2" should be replica of "redis1"
-        Then redis host "redis3" should be replica of "redis1"
+        Then valkey host "valkey2" should be replica of "valkey1"
+        Then valkey host "valkey3" should be replica of "valkey1"
         When I get zookeeper node "/test/manager"
         And I save zookeeper query result as "new_manager"
         And I run command on host "{{.new_manager.hostname}}"
@@ -314,73 +314,73 @@ Feature: Sentinel mode failover from dead master
 
     Scenario: Sentinel mode failover works for 2 node shard
         Given sentinel shard is up and running
-        When host "redis3" is deleted
-        Then redis host "redis3" should become unavailable within "10" seconds
+        When host "valkey3" is deleted
+        Then valkey host "valkey3" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
+        When host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis2.*
+            .*valkey2.*
         """
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
         }
         """
-        Then redis host "redis2" should be master
-        When host "redis1" is started
-        Then redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "redis2" within "30" seconds
-        And senticache host "redis1" should have master "redis2" within "30" seconds
-        And senticache host "redis2" should have master "redis2" within "30" seconds
+        Then valkey host "valkey2" should be master
+        When host "valkey1" is started
+        Then valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "valkey2" within "30" seconds
+        And senticache host "valkey1" should have master "valkey2" within "30" seconds
+        And senticache host "valkey2" should have master "valkey2" within "30" seconds
 
     Scenario: Sentinel mode failover fails for 2 node shard with lagging replica
         Given sentinel shard is up and running
-        When host "redis3" is deleted
-        Then redis host "redis3" should become unavailable within "10" seconds
+        When host "valkey3" is deleted
+        Then valkey host "valkey3" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2"]
+            ["valkey1","valkey2"]
         """
-        When host "redis2" is stopped
-        Then redis host "redis2" should become unavailable within "10" seconds
+        When host "valkey2" is stopped
+        Then valkey host "valkey2" should become unavailable within "10" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "60" seconds
         """
-            ["redis1"]
+            ["valkey1"]
         """
         When I wait for "30" seconds
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             SET MYKEY TESTVALUE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             OK
         """
         When I wait for "30" seconds
-        And host "redis1" is stopped
-        Then redis host "redis1" should become unavailable within "10" seconds
-        When host "redis2" is started
-        Then redis host "redis2" should become available within "10" seconds
+        And host "valkey1" is stopped
+        Then valkey host "valkey1" should become unavailable within "10" seconds
+        When host "valkey2" is started
+        Then valkey host "valkey2" should become available within "10" seconds
         Then zookeeper node "/test/manager" should match regexp within "10" seconds
         """
-            .*redis2.*
+            .*valkey2.*
         """
         Then zookeeper node "/test/master" should match regexp
         """
-            .*redis1.*
+            .*valkey1.*
         """
         When I wait for "60" seconds
-        When I run command on host "redis2"
+        When I run command on host "valkey2"
         """
             grep Failover /var/log/rdsync.log
         """
@@ -392,7 +392,7 @@ Feature: Sentinel mode failover from dead master
     Scenario: Sentinel mode master restart with disabled persistence causes failover
         Given sentinel shard is up and running
         And persistence is disabled
-        Then zookeeper node "/test/health/redis1" should match json within "30" seconds
+        Then zookeeper node "/test/health/valkey1" should match json within "30" seconds
         """
         {
             "ping_ok": true,
@@ -400,31 +400,31 @@ Feature: Sentinel mode failover from dead master
             "is_read_only": false
         }
         """
-        And zookeeper node "/test/health/redis2" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey2" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        And zookeeper node "/test/health/redis3" should match json within "30" seconds
+        And zookeeper node "/test/health/valkey3" should match json within "30" seconds
         """
         {
             "ping_ok": true,
             "is_master": false
         }
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             SET very-important-key foo
         """
         And I wait for "1" seconds
-        And redis on host "redis1" is restarted
+        And valkey on host "valkey1" is restarted
         And zookeeper node "/test/last_switch" should match json within "60" seconds
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -432,17 +432,17 @@ Feature: Sentinel mode failover from dead master
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        And redis host "redis1" should become available within "20" seconds
-        And redis host "redis1" should become replica of "{{.new_master}}" within "30" seconds
-        And senticache host "redis1" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis2" should have master "{{.new_master}}" within "30" seconds
-        And senticache host "redis3" should have master "{{.new_master}}" within "30" seconds
-        When I run command on redis host "{{.new_master}}"
+        Then valkey host "{{.new_master}}" should be master
+        And valkey host "valkey1" should become available within "20" seconds
+        And valkey host "valkey1" should become replica of "{{.new_master}}" within "30" seconds
+        And senticache host "valkey1" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey2" should have master "{{.new_master}}" within "30" seconds
+        And senticache host "valkey3" should have master "{{.new_master}}" within "30" seconds
+        When I run command on valkey host "{{.new_master}}"
         """
             GET very-important-key
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*foo.*
         """
diff --git a/tests/features/05_cluster_replication_fix.feature b/tests/features/05_cluster_replication_fix.feature
index 7a870b1..64bc272 100644
--- a/tests/features/05_cluster_replication_fix.feature
+++ b/tests/features/05_cluster_replication_fix.feature
@@ -1,116 +1,116 @@
 Feature: Cluster mode broken replication fix
 
-    Scenario: Cluster mode broken shard with divergence in DCS and redis is fixed
+    Scenario: Cluster mode broken shard with divergence in DCS and valkey is fixed
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal STOP rdsync
         """
-        When I run command on redis host "redis2"
+        When I run command on valkey host "valkey2"
         """
             CLUSTER FAILOVER
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG SET repl-paused yes
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
             CONFIG SET repl-paused yes
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal CONT rdsync
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis2"
+            "valkey2"
         """
         When I wait for "30" seconds
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
             CONFIG GET repl-paused
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*no.*
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
             CONFIG GET repl-paused
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*no.*
         """
 
-    Scenario: Cluster mode master info divergence in DCS and redis is fixed
+    Scenario: Cluster mode master info divergence in DCS and valkey is fixed
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/master" to
         """
-            "redis3"
+            "valkey3"
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis1"
+            "valkey1"
         """
 
     Scenario: Cluster mode nonexistent master info in DCS is fixed
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/master" to
         """
@@ -118,148 +118,148 @@ Feature: Cluster mode broken replication fix
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis1"
+            "valkey1"
         """
 
     Scenario: Cluster mode accidental cascade replication is fixed
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis3"
+        When I run command on host "valkey3"
         """
-            setup_cluster.sh redis2
+            setup_cluster.sh valkey2
         """
         Then command return code should be "0"
-        And redis host "redis3" should become replica of "redis1" within "60" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "60" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
 
     Scenario: Cluster mode replication pause on replica is fixed
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I break replication on host "redis3"
-        Then redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "60" seconds
+        When I break replication on host "valkey3"
+        Then valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "60" seconds
 
     Scenario: Cluster lone node is joined in cluster back
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis3"
+        When I run command on host "valkey3"
         """
-            rm -f /etc/redis/cluster.conf
+            rm -f /etc/valkey/cluster.conf
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            sed -i -e 's/offline yes/offline no/' /etc/redis/redis.conf
+            sed -i -e 's/offline yes/offline no/' /etc/valkey/valkey.conf
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            supervisorctl signal KILL redis
+            supervisorctl signal KILL valkey
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            supervisorctl start redis
+            supervisorctl start valkey
         """
-        Then redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
 
     Scenario: Cluster splitbrain is fixed in favor of node with slots
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            rm -f /etc/redis/cluster.conf
+            rm -f /etc/valkey/cluster.conf
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            sed -i -e 's/offline yes/offline no/' /etc/redis/redis.conf
+            sed -i -e 's/offline yes/offline no/' /etc/valkey/valkey.conf
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            supervisorctl signal KILL redis
+            supervisorctl signal KILL valkey
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
-            supervisorctl start redis
+            supervisorctl start valkey
         """
-        Then redis host "redis3" should become available within "60" seconds
-        When I run command on redis host "redis1"
+        Then valkey host "valkey3" should become available within "60" seconds
+        When I run command on valkey host "valkey1"
         """
             SET very-important-key foo
         """
         And I set zookeeper node "/test/master" to
         """
-            "redis3"
+            "valkey3"
         """
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal CONT rdsync
         """
-        Then redis host "redis3" should become replica of "redis1" within "60" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey3" should become replica of "valkey1" within "60" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             GET very-important-key
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*foo.*
         """
diff --git a/tests/features/05_sentinel_replication_fix.feature b/tests/features/05_sentinel_replication_fix.feature
index dcb58d5..fe065aa 100644
--- a/tests/features/05_sentinel_replication_fix.feature
+++ b/tests/features/05_sentinel_replication_fix.feature
@@ -1,132 +1,132 @@
 Feature: Sentinel mode broken replication fix
 
-    Scenario: Sentinel mode broken shard with divergence in DCS and redis is fixed
+    Scenario: Sentinel mode broken shard with divergence in DCS and valkey is fixed
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal STOP rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal STOP rdsync
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
-            REPLICAOF redis2 6379
+            REPLICAOF valkey2 6379
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis2"
+        When I run command on valkey host "valkey2"
         """
             REPLICAOF NO ONE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
-            REPLICAOF redis2 6379
+            REPLICAOF valkey2 6379
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis1"
+        When I run command on valkey host "valkey1"
         """
             CONFIG SET repl-paused yes
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
             CONFIG SET repl-paused yes
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*OK.*
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis2"
+        And I run command on host "valkey2"
         """
             supervisorctl signal CONT rdsync
         """
-        And I run command on host "redis3"
+        And I run command on host "valkey3"
         """
             supervisorctl signal CONT rdsync
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis2"
+            "valkey2"
         """
         When I wait for "30" seconds
-        And I run command on redis host "redis1"
+        And I run command on valkey host "valkey1"
         """
             CONFIG GET repl-paused
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*no.*
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
             CONFIG GET repl-paused
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             .*no.*
         """
 
-    Scenario: Sentinel mode master info divergence in DCS and redis is fixed
+    Scenario: Sentinel mode master info divergence in DCS and valkey is fixed
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/master" to
         """
-            "redis3"
+            "valkey3"
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis1"
+            "valkey1"
         """
 
     Scenario: Sentinel mode nonexistent master info in DCS is fixed
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When I set zookeeper node "/test/master" to
         """
@@ -134,42 +134,42 @@ Feature: Sentinel mode broken replication fix
         """
         Then zookeeper node "/test/master" should match json_exactly within "30" seconds
         """
-            "redis1"
+            "valkey1"
         """
 
     Scenario: Sentinel mode accidental cascade replication is fixed
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on redis host "redis3"
+        When I run command on valkey host "valkey3"
         """
-            REPLICAOF redis2 6379
+            REPLICAOF valkey2 6379
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             OK
         """
-        And redis host "redis3" should become replica of "redis1" within "60" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "60" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
 
     Scenario: Sentinel mode replication pause on replica is fixed
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I break replication on host "redis3"
-        Then redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "60" seconds
+        When I break replication on host "valkey3"
+        Then valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "60" seconds
diff --git a/tests/features/06_cluster_lost.feature b/tests/features/06_cluster_lost.feature
index 96dbec7..446253e 100644
--- a/tests/features/06_cluster_lost.feature
+++ b/tests/features/06_cluster_lost.feature
@@ -2,101 +2,101 @@ Feature: Cluster mode survives dcs conn loss
 
     Scenario: Cluster mode survives dcs conn loss
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
-        When I run command on redis host "redis1"
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
+        When I run command on valkey host "valkey1"
         """
             SET MYKEY TESTVALUE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             OK
         """
 
     Scenario: Cluster mode partitioned master goes offline
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        And host "redis2" is detached from the network
-        And host "redis3" is detached from the network
-        Then redis host "redis1" should become unavailable within "30" seconds
+        And host "valkey2" is detached from the network
+        And host "valkey3" is detached from the network
+        Then valkey host "valkey1" should become unavailable within "30" seconds
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
-        And host "redis2" is attached to the network
-        And host "redis3" is attached to the network
-        Then redis host "redis1" should become available within "60" seconds
+        And host "valkey2" is attached to the network
+        And host "valkey3" is attached to the network
+        Then valkey host "valkey1" should become available within "60" seconds
 
     Scenario: Cluster mode partitioned replica goes offline
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        And host "redis1" is detached from the network
-        And host "redis3" is detached from the network
-        Then redis host "redis2" should become unavailable within "30" seconds
+        And host "valkey1" is detached from the network
+        And host "valkey3" is detached from the network
+        Then valkey host "valkey2" should become unavailable within "30" seconds
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
-        And host "redis1" is attached to the network
-        And host "redis3" is attached to the network
-        Then redis host "redis2" should become available within "60" seconds
+        And host "valkey1" is attached to the network
+        And host "valkey3" is attached to the network
+        Then valkey host "valkey2" should become available within "60" seconds
 
     Scenario: Cluster mode partially partitioned manager gives up on manager role
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1" with timeout "20" seconds
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl stop rdsync
         """
         Then command return code should be "0"
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
-        When I run command on host "redis1" with timeout "20" seconds
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl start rdsync
         """
@@ -104,7 +104,7 @@ Feature: Cluster mode survives dcs conn loss
         And I save zookeeper query result as "new_manager"
         And port "6379" on host "{{.new_manager.hostname}}" is blocked
         And I wait for "60" seconds
-        Then redis host "redis1" should be master
+        Then valkey host "valkey1" should be master
         When I run command on host "{{.new_manager.hostname}}"
         """
             grep ERROR /var/log/rdsync.log
@@ -124,23 +124,23 @@ Feature: Cluster mode survives dcs conn loss
         When port "6379" on host "{{.new_manager.hostname}}" is unblocked
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
 
     Scenario: Cluster mode partially partitioned manager gives up on manager role and triggers failover on master
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When port "6379" on host "redis1" is blocked
+        When port "6379" on host "valkey1" is blocked
         And I wait for "180" seconds
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             grep ERROR /var/log/rdsync.log
         """
@@ -152,7 +152,7 @@ Feature: Cluster mode survives dcs conn loss
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -160,9 +160,9 @@ Feature: Cluster mode survives dcs conn loss
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When port "6379" on host "redis1" is unblocked
+        Then valkey host "{{.new_master}}" should be master
+        When port "6379" on host "valkey1" is unblocked
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
diff --git a/tests/features/06_sentinel_lost.feature b/tests/features/06_sentinel_lost.feature
index f65653f..286ddb7 100644
--- a/tests/features/06_sentinel_lost.feature
+++ b/tests/features/06_sentinel_lost.feature
@@ -2,101 +2,101 @@ Feature: Sentinel mode survives dcs conn loss
 
     Scenario: Sentinel mode survives dcs conn loss
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
-        When I run command on redis host "redis1"
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
+        When I run command on valkey host "valkey1"
         """
             SET MYKEY TESTVALUE
         """
-        Then redis cmd result should match regexp
+        Then valkey cmd result should match regexp
         """
             OK
         """
 
     Scenario: Sentinel mode partitioned master goes offline
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        And host "redis2" is detached from the network
-        And host "redis3" is detached from the network
-        Then redis host "redis1" should become unavailable within "30" seconds
+        And host "valkey2" is detached from the network
+        And host "valkey3" is detached from the network
+        Then valkey host "valkey1" should become unavailable within "30" seconds
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
-        And host "redis2" is attached to the network
-        And host "redis3" is attached to the network
-        Then redis host "redis1" should become available within "60" seconds
+        And host "valkey2" is attached to the network
+        And host "valkey3" is attached to the network
+        Then valkey host "valkey1" should become available within "60" seconds
 
     Scenario: Sentinel mode partitioned replica goes offline
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
         When host "zoo3" is detached from the network
         And host "zoo2" is detached from the network
         And host "zoo1" is detached from the network
-        And host "redis1" is detached from the network
-        And host "redis3" is detached from the network
-        Then redis host "redis2" should become unavailable within "30" seconds
+        And host "valkey1" is detached from the network
+        And host "valkey3" is detached from the network
+        Then valkey host "valkey2" should become unavailable within "30" seconds
         When host "zoo3" is attached to the network
         And host "zoo2" is attached to the network
         And host "zoo1" is attached to the network
-        And host "redis1" is attached to the network
-        And host "redis3" is attached to the network
-        Then redis host "redis2" should become available within "60" seconds
+        And host "valkey1" is attached to the network
+        And host "valkey3" is attached to the network
+        Then valkey host "valkey2" should become available within "60" seconds
 
     Scenario: Sentinel mode partially partitioned manager gives up on manager role
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1" with timeout "20" seconds
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl stop rdsync
         """
         Then command return code should be "0"
         And  zookeeper node "/test/manager" should match regexp within "30" seconds
         """
-            .*redis[23].*
+            .*valkey[23].*
         """
-        When I run command on host "redis1" with timeout "20" seconds
+        When I run command on host "valkey1" with timeout "20" seconds
         """
             supervisorctl start rdsync
         """
@@ -104,7 +104,7 @@ Feature: Sentinel mode survives dcs conn loss
         And I save zookeeper query result as "new_manager"
         And port "6379" on host "{{.new_manager.hostname}}" is blocked
         And I wait for "60" seconds
-        Then redis host "redis1" should be master
+        Then valkey host "valkey1" should be master
         When I run command on host "{{.new_manager.hostname}}"
         """
             grep ERROR /var/log/rdsync.log
@@ -124,23 +124,23 @@ Feature: Sentinel mode survives dcs conn loss
         When port "6379" on host "{{.new_manager.hostname}}" is unblocked
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
 
     Scenario: Sentinel mode partially partitioned manager gives up on manager role and triggers failover on master
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When port "6379" on host "redis1" is blocked
+        When port "6379" on host "valkey1" is blocked
         And I wait for "180" seconds
-        And I run command on host "redis1"
+        And I run command on host "valkey1"
         """
             grep ERROR /var/log/rdsync.log
         """
@@ -152,7 +152,7 @@ Feature: Sentinel mode survives dcs conn loss
         """
         {
             "cause": "auto",
-            "from": "redis1",
+            "from": "valkey1",
             "result": {
                 "ok": true
             }
@@ -160,9 +160,9 @@ Feature: Sentinel mode survives dcs conn loss
         """
         When I get zookeeper node "/test/master"
         And I save zookeeper query result as "new_master"
-        Then redis host "{{.new_master}}" should be master
-        When port "6379" on host "redis1" is unblocked
+        Then valkey host "{{.new_master}}" should be master
+        When port "6379" on host "valkey1" is unblocked
         Then zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
diff --git a/tests/features/07_cluster_local_repair.feature b/tests/features/07_cluster_local_repair.feature
index 0822a83..c79135c 100644
--- a/tests/features/07_cluster_local_repair.feature
+++ b/tests/features/07_cluster_local_repair.feature
@@ -2,35 +2,35 @@ Feature: Cluster mode local node repair
 
     Scenario: Cluster mode replica is restarted after OOM
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When redis on host "redis2" is killed
+        When valkey on host "valkey2" is killed
         And I wait for "300" seconds
-        Then redis host "redis2" should become available within "120" seconds
-        And redis host "redis2" should become replica of "redis1" within "60" seconds
-        And replication on redis host "redis2" should run fine within "60" seconds
+        Then valkey host "valkey2" should become available within "120" seconds
+        And valkey host "valkey2" should become replica of "valkey1" within "60" seconds
+        And replication on valkey host "valkey2" should run fine within "60" seconds
 
     Scenario: Cluster mode master is restarted after hanging
         Given clustered shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run async command on host "redis1"
+        When I run async command on host "valkey1"
         """
-            redis-cli -a functestpassword DEBUG SLEEP 600
+            valkey-cli -a functestpassword DEBUG SLEEP 600
         """
         And I wait for "420" seconds
-        Then redis host "redis1" should become available within "60" seconds
+        Then valkey host "valkey1" should become available within "60" seconds
diff --git a/tests/features/07_sentinel_local_repair.feature b/tests/features/07_sentinel_local_repair.feature
index 27b1b9b..db136a9 100644
--- a/tests/features/07_sentinel_local_repair.feature
+++ b/tests/features/07_sentinel_local_repair.feature
@@ -2,52 +2,52 @@ Feature: Sentinel mode local node repair
 
     Scenario: Sentinel mode senticache is restarted after OOM
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run command on host "redis1"
+        When I run command on host "valkey1"
         """
             supervisorctl stop senticache
         """
-        Then senticache host "redis1" should have master "redis1" within "30" seconds
+        Then senticache host "valkey1" should have master "valkey1" within "30" seconds
 
     Scenario: Sentinel mode replica is restarted after OOM
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When redis on host "redis2" is killed
+        When valkey on host "valkey2" is killed
         And I wait for "300" seconds
-        Then redis host "redis2" should become available within "120" seconds
-        And redis host "redis2" should become replica of "redis1" within "60" seconds
-        And replication on redis host "redis2" should run fine within "60" seconds
+        Then valkey host "valkey2" should become available within "120" seconds
+        And valkey host "valkey2" should become replica of "valkey1" within "60" seconds
+        And replication on valkey host "valkey2" should run fine within "60" seconds
 
     Scenario: Sentinel mode master is restarted after hanging
         Given sentinel shard is up and running
-        Then redis host "redis1" should be master
-        And redis host "redis2" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis2" should run fine within "15" seconds
-        And redis host "redis3" should become replica of "redis1" within "15" seconds
-        And replication on redis host "redis3" should run fine within "15" seconds
+        Then valkey host "valkey1" should be master
+        And valkey host "valkey2" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey2" should run fine within "15" seconds
+        And valkey host "valkey3" should become replica of "valkey1" within "15" seconds
+        And replication on valkey host "valkey3" should run fine within "15" seconds
         And zookeeper node "/test/active_nodes" should match json_exactly within "30" seconds
         """
-            ["redis1","redis2","redis3"]
+            ["valkey1","valkey2","valkey3"]
         """
-        When I run async command on host "redis1"
+        When I run async command on host "valkey1"
         """
-            redis-cli -a functestpassword DEBUG SLEEP 600
+            valkey-cli -a functestpassword DEBUG SLEEP 600
         """
         And I wait for "420" seconds
-        Then redis host "redis1" should become available within "60" seconds
+        Then valkey host "valkey1" should become available within "60" seconds
diff --git a/tests/images/docker-compose.yaml b/tests/images/docker-compose.yaml
index 5c93294..ac53036 100644
--- a/tests/images/docker-compose.yaml
+++ b/tests/images/docker-compose.yaml
@@ -49,10 +49,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.12
 
-  redis1:
+  valkey1:
     build:
-      context: ./redis
-    hostname: redis1
+      context: ./valkey
+    hostname: valkey1
     ports:
       - 6379
       - 26379
@@ -61,10 +61,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.13
 
-  redis2:
+  valkey2:
     build:
-      context: ./redis
-    hostname: redis2
+      context: ./valkey
+    hostname: valkey2
     ports:
       - 6379
       - 26379
@@ -73,10 +73,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.14
 
-  redis3:
+  valkey3:
     build:
-      context: ./redis
-    hostname: redis3
+      context: ./valkey
+    hostname: valkey3
     ports:
       - 6379
       - 26379
diff --git a/tests/images/jepsen-compose.yaml b/tests/images/jepsen-compose.yaml
index d31554f..fae9e85 100644
--- a/tests/images/jepsen-compose.yaml
+++ b/tests/images/jepsen-compose.yaml
@@ -49,10 +49,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.12
 
-  redis1:
+  valkey1:
     build:
-      context: ./redis
-    hostname: redis1
+      context: ./valkey
+    hostname: valkey1
     ports:
       - 6379
       - 26379
@@ -62,10 +62,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.13
 
-  redis2:
+  valkey2:
     build:
-      context: ./redis
-    hostname: redis2
+      context: ./valkey
+    hostname: valkey2
     ports:
       - 6379
       - 26379
@@ -75,10 +75,10 @@ services:
       rdsync_net:
         ipv4_address: 192.168.234.14
 
-  redis3:
+  valkey3:
     build:
-      context: ./redis
-    hostname: redis3
+      context: ./valkey
+    hostname: valkey3
     ports:
       - 6379
       - 26379
diff --git a/tests/images/jepsen/jepsen/src/jepsen/rdsync.clj b/tests/images/jepsen/jepsen/src/jepsen/rdsync.clj
index be8d192..1b6ae99 100644
--- a/tests/images/jepsen/jepsen/src/jepsen/rdsync.clj
+++ b/tests/images/jepsen/jepsen/src/jepsen/rdsync.clj
@@ -33,15 +33,15 @@
     client/Reusable
     (reusable? [_ test] true)))
 
-(defn redis-client
-  "Redis client"
+(defn valkey-client
+  "Valkey client"
   [node]
   (reify client/Client
     (setup! [_ test]
-      (info "redis-client setup"))
+      (info "valkey-client setup"))
     (open! [_ test node]
       (cond (not (string/includes? (name node) "zoo"))
-            (redis-client node)
+            (valkey-client node)
             true
             (noop-client)))
 
@@ -70,7 +70,7 @@
     (reusable? [_ test] true)))
 
 (defn db
-  "Redis database"
+  "Valkey database"
   []
   (reify db/DB
     (setup! [_ test node]
@@ -140,7 +140,7 @@
              (case (:f op)
                :switch (assoc op :value
                           (try
-                              (let [node (rand-nth (filter (fn [x] (string/includes? (name x) "redis"))
+                              (let [node (rand-nth (filter (fn [x] (string/includes? (name x) "valkey"))
                                                            (:nodes test)))]
                                 (control/on node
                                   (control/exec :timeout :10 :rdsync :switch :--to node))
@@ -165,7 +165,7 @@
              (case (:f op)
                :kill (assoc op :value
                       (try
-                          (let [node (rand-nth (filter (fn [x] (string/includes? (name x) "redis"))
+                          (let [node (rand-nth (filter (fn [x] (string/includes? (name x) "valkey"))
                                                        (:nodes test)))]
                             (control/on node
                               (control/exec :supervisorctl :signal :KILL :rdsync))
@@ -183,14 +183,14 @@
 (def nemesis-starts [:start-halves :start-ring :start-one :switch :kill])
 
 (defn rdsync-test
-  [redis-nodes zookeeper-nodes]
-  {:nodes     (concat redis-nodes zookeeper-nodes)
+  [valkey-nodes zookeeper-nodes]
+  {:nodes     (concat valkey-nodes zookeeper-nodes)
    :name      "rdsync"
    :os        os/noop
    :db        (db)
    :ssh       {:private-key-path "/root/.ssh/id_rsa" :strict-host-key-checking :no :password ""}
    :net       net/iptables
-   :client    (redis-client nil)
+   :client    (valkey-client nil)
    :nemesis   (nemesis/compose {{:start-halves :start} (nemesis/partition-random-halves)
                                 {:start-ring   :start} (nemesis/partition-majorities-ring)
                                 {:start-one    :start
diff --git a/tests/images/jepsen/jepsen/test/jepsen/rdsync_test.clj b/tests/images/jepsen/jepsen/test/jepsen/rdsync_test.clj
index 7e44445..1886ba1 100644
--- a/tests/images/jepsen/jepsen/test/jepsen/rdsync_test.clj
+++ b/tests/images/jepsen/jepsen/test/jepsen/rdsync_test.clj
@@ -3,9 +3,9 @@
             [jepsen.core :as jepsen]
             [jepsen.rdsync :as rdsync]))
 
-(def redis_nodes ["redis1" "redis2" "redis3"])
+(def valkey_nodes ["valkey1" "valkey2" "valkey3"])
 
 (def zk_nodes ["zoo1" "zoo2" "zoo3"])
 
 (deftest rdsync-test
-  (is (:valid? (:results (jepsen/run! (rdsync/rdsync-test redis_nodes zk_nodes))))))
+  (is (:valid? (:results (jepsen/run! (rdsync/rdsync-test valkey_nodes zk_nodes))))))
diff --git a/tests/images/jepsen/save_logs.sh b/tests/images/jepsen/save_logs.sh
index 5f95ac8..e75b56f 100755
--- a/tests/images/jepsen/save_logs.sh
+++ b/tests/images/jepsen/save_logs.sh
@@ -2,13 +2,13 @@
 
 for i in 1 2 3
 do
-    mkdir -p tests/logs/redis${i}
+    mkdir -p tests/logs/valkey${i}
     mkdir -p tests/logs/zookeeper${i}
 
-    for logfile in /var/log/rdsync.log /var/log/redis/server.log /var/log/redis/senticache.log /var/log/supervisor.log
+    for logfile in /var/log/rdsync.log /var/log/valkey/server.log /var/log/valkey/senticache.log /var/log/supervisor.log
     do
         logname=$(echo "${logfile}" | rev | cut -d/ -f1 | rev)
-        docker exec rdsync-redis${i}-1 cat "${logfile}" > "tests/logs/redis${i}/${logname}"
+        docker exec rdsync-valkey${i}-1 cat "${logfile}" > "tests/logs/valkey${i}/${logname}"
     done
 
     docker exec rdsync-zoo${i}-1 cat /var/log/zookeeper/zookeeper.log > tests/logs/zookeeper${i}/zookeeper.log 2>&1
diff --git a/tests/images/redis/Dockerfile b/tests/images/redis/Dockerfile
deleted file mode 100644
index 2527d9f..0000000
--- a/tests/images/redis/Dockerfile
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM rdsync-base:latest
-COPY . /var/lib/dist/redis
-COPY ./rdsync /usr/bin/rdsync
-COPY ./redis-server /usr/bin/redis-server
-COPY ./redis-cli /usr/bin/redis-cli
-COPY ./redis-senticache /usr/bin/redis-senticache
-RUN bash /var/lib/dist/redis/setup.sh
diff --git a/tests/images/redis/setup.sh b/tests/images/redis/setup.sh
deleted file mode 100755
index 3d25e7b..0000000
--- a/tests/images/redis/setup.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -xe
-
-mkdir -p /var/lib/redis /var/log/redis /etc/redis
-touch /var/log/redis/senticache.log
-cp /var/lib/dist/redis/default.conf /etc/redis/redis.conf
-cp /var/lib/dist/redis/supervisor_redis.conf /etc/supervisor/conf.d
-
-cp /var/lib/dist/redis/setup_*.sh /usr/local/bin
diff --git a/tests/images/redis/setup_cluster.sh b/tests/images/redis/setup_cluster.sh
deleted file mode 100755
index 77ff33c..0000000
--- a/tests/images/redis/setup_cluster.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-
-set -xe
-
-MASTER=${1}
-
-supervisorctl stop redis
-
-cat >>/etc/redis/redis.conf <<EOF
-cluster-enabled yes
-cluster-config-file "/etc/redis/cluster.conf"
-cluster-slave-no-failover yes
-cluster-allow-replica-migration no
-EOF
-
-supervisorctl start redis
-
-if [ "${MASTER}" != "" ]
-then
-    redis-cli -e -a functestpassword -p 6379 config set offline no
-    master_addr=$(host ${MASTER} | awk '{print $NF}')
-    redis-cli -e -a functestpassword -p 6379 cluster meet ${master_addr} 6379
-    master_id=$(redis-cli -e -a functestpassword -h ${master_addr} -p 6379 cluster myid)
-    tries=0
-    ok=0
-    while [ ${tries} -le 60 ]
-    do
-        if redis-cli -e -a functestpassword -p 6379 cluster nodes | grep -q ${master_id}
-        then
-            ok=1
-            break
-        else
-            tries=$(( tries + 1 ))
-            sleep 1
-        fi
-    done
-    if [ "${ok}" != "1" ]
-    then
-        echo "Cluster meet failed"
-        exit 1
-    fi
-    redis-cli -e -a functestpassword -p 6379 cluster replicate ${master_id}
-    tries=0
-    ok=0
-    while [ ${tries} -le 60 ]
-    do
-        if redis-cli -e -a functestpassword -p 6379 cluster nodes | grep -q myself,slave
-        then
-            ok=1
-            break
-        else
-            tries=$(( tries + 1 ))
-            sleep 1
-        fi
-    done
-    if [ "${ok}" != "1" ]
-    then
-        echo "Cluster replication init failed"
-        exit 1
-    fi
-else
-    redis-cli -e -a functestpassword -p 6379 config set offline no
-    redis-cli -e -a functestpassword -p 6379 cluster addslotsrange 0 16383
-fi
-
-cp /var/lib/dist/redis/supervisor_rdsync.conf /etc/supervisor/conf.d/rdsync.conf
-cp /var/lib/dist/redis/rdsync_cluster.yaml /etc/rdsync.yaml
-
-/var/lib/dist/base/generate_certs.sh
-
-supervisorctl update
diff --git a/tests/images/redis/setup_sentinel.sh b/tests/images/redis/setup_sentinel.sh
deleted file mode 100755
index 6ac7f76..0000000
--- a/tests/images/redis/setup_sentinel.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-set -xe
-
-MASTER=${1}
-
-if [ "${MASTER}" != "" ]
-then
-    redis-cli -e -a functestpassword -p 6379 config set offline no
-    master_addr=$(host ${MASTER} | awk '{print $NF}')
-    redis-cli -e -a functestpassword -p 6379 replicaof ${master_addr} 6379
-    redis-cli -e -a functestpassword -p 6379 config rewrite
-    tries=0
-    ok=0
-    while [ ${tries} -le 60 ]
-    do
-        if redis-cli -e -a functestpassword -p 6379 info replication | grep -q master_link_status:up
-        then
-            ok=1
-            break
-        else
-            tries=$(( tries + 1 ))
-            sleep 1
-        fi
-    done
-    if [ "${ok}" != "1" ]
-    then
-        echo "Cluster meet failed"
-        exit 1
-    fi
-else
-    redis-cli -e -a functestpassword -p 6379 config set offline no
-fi
-
-cp /var/lib/dist/redis/supervisor_rdsync.conf /etc/supervisor/conf.d/rdsync.conf
-cp /var/lib/dist/redis/rdsync_sentinel.yaml /etc/rdsync.yaml
-
-cp /var/lib/dist/redis/supervisor_senticache.conf /etc/supervisor/conf.d/senticache.conf
-cp /var/lib/dist/redis/senticache.conf /etc/redis/senticache.conf
-
-/var/lib/dist/base/generate_certs.sh
-
-supervisorctl update
diff --git a/tests/images/redis/supervisor_redis.conf b/tests/images/redis/supervisor_redis.conf
deleted file mode 100644
index 54722b7..0000000
--- a/tests/images/redis/supervisor_redis.conf
+++ /dev/null
@@ -1,9 +0,0 @@
-[program:redis]
-command=/usr/bin/redis-server /etc/redis/redis.conf
-process_name=%(program_name)s
-autostart=true
-autorestart=false
-stopsignal=TERM
-priority=5
-redirect_stderr=true
-stdout_logfile=/var/log/redis/server.log
diff --git a/tests/images/valkey/Dockerfile b/tests/images/valkey/Dockerfile
new file mode 100644
index 0000000..56bde13
--- /dev/null
+++ b/tests/images/valkey/Dockerfile
@@ -0,0 +1,7 @@
+FROM rdsync-base:latest
+COPY . /var/lib/dist/valkey
+COPY ./rdsync /usr/bin/rdsync
+COPY ./valkey-server /usr/bin/valkey-server
+COPY ./valkey-cli /usr/bin/valkey-cli
+COPY ./valkey-senticache /usr/bin/valkey-senticache
+RUN bash /var/lib/dist/valkey/setup.sh
diff --git a/tests/images/redis/default.conf b/tests/images/valkey/default.conf
similarity index 95%
rename from tests/images/redis/default.conf
rename to tests/images/valkey/default.conf
index ac78a1d..5beb5af 100644
--- a/tests/images/redis/default.conf
+++ b/tests/images/valkey/default.conf
@@ -13,7 +13,7 @@ maxclients 65000
 repl-backlog-size 128mb
 maxmemory 1gb
 save ""
-dir "/var/lib/redis"
+dir "/var/lib/valkey"
 client-output-buffer-limit normal 2mb 1mb 60
 client-output-buffer-limit replica 16mb 1mb 60
 client-output-buffer-limit pubsub 2mb 1mb 60
diff --git a/tests/images/redis/rdsync_cluster.yaml b/tests/images/valkey/rdsync_cluster.yaml
similarity index 84%
rename from tests/images/redis/rdsync_cluster.yaml
rename to tests/images/valkey/rdsync_cluster.yaml
index 784c758..89e51c3 100644
--- a/tests/images/redis/rdsync_cluster.yaml
+++ b/tests/images/valkey/rdsync_cluster.yaml
@@ -5,10 +5,10 @@ pprof_addr: ":8081"
 info_file: /var/run/rdsync.info
 maintenance_file: /var/run/rdsync.maintenance
 daemon_lock_file: /var/run/rdsync.lock
-redis:
+valkey:
   auth_password: functestpassword
-  restart_command: supervisorctl restart redis
-  aof_path: /var/lib/redis/appendonlydir
+  restart_command: supervisorctl restart valkey
+  aof_path: /var/lib/valkey/appendonlydir
 zookeeper:
   session_timeout: 3s
   namespace: /test
diff --git a/tests/images/redis/rdsync_sentinel.yaml b/tests/images/valkey/rdsync_sentinel.yaml
similarity index 87%
rename from tests/images/redis/rdsync_sentinel.yaml
rename to tests/images/valkey/rdsync_sentinel.yaml
index dea1170..f0a79a7 100644
--- a/tests/images/redis/rdsync_sentinel.yaml
+++ b/tests/images/valkey/rdsync_sentinel.yaml
@@ -5,10 +5,10 @@ pprof_addr: ":8081"
 info_file: /var/run/rdsync.info
 maintenance_file: /var/run/rdsync.maintenance
 daemon_lock_file: /var/run/rdsync.lock
-redis:
+valkey:
   auth_password: functestpassword
-  restart_command: supervisorctl restart redis
-  aof_path: /var/lib/redis/appendonlydir
+  restart_command: supervisorctl restart valkey
+  aof_path: /var/lib/valkey/appendonlydir
 sentinel_mode:
   announce_hostname: true
   cluster_name: functest
diff --git a/tests/images/redis/senticache.conf b/tests/images/valkey/senticache.conf
similarity index 100%
rename from tests/images/redis/senticache.conf
rename to tests/images/valkey/senticache.conf
diff --git a/tests/images/valkey/setup.sh b/tests/images/valkey/setup.sh
new file mode 100755
index 0000000..5bb69bc
--- /dev/null
+++ b/tests/images/valkey/setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -xe
+
+mkdir -p /var/lib/valkey /var/log/valkey /etc/valkey
+touch /var/log/valkey/senticache.log
+cp /var/lib/dist/valkey/default.conf /etc/valkey/valkey.conf
+cp /var/lib/dist/valkey/supervisor_valkey.conf /etc/supervisor/conf.d
+
+cp /var/lib/dist/valkey/setup_*.sh /usr/local/bin
diff --git a/tests/images/valkey/setup_cluster.sh b/tests/images/valkey/setup_cluster.sh
new file mode 100755
index 0000000..e615e39
--- /dev/null
+++ b/tests/images/valkey/setup_cluster.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+set -xe
+
+MASTER=${1}
+
+supervisorctl stop valkey
+
+cat >>/etc/valkey/valkey.conf <<EOF
+cluster-enabled yes
+cluster-config-file "/etc/valkey/cluster.conf"
+cluster-slave-no-failover yes
+cluster-allow-replica-migration no
+EOF
+
+supervisorctl start valkey
+
+if [ "${MASTER}" != "" ]
+then
+    valkey-cli -e -a functestpassword -p 6379 config set offline no
+    master_addr=$(host ${MASTER} | awk '{print $NF}')
+    valkey-cli -e -a functestpassword -p 6379 cluster meet ${master_addr} 6379
+    master_id=$(valkey-cli -e -a functestpassword -h ${master_addr} -p 6379 cluster myid)
+    tries=0
+    ok=0
+    while [ ${tries} -le 60 ]
+    do
+        if valkey-cli -e -a functestpassword -p 6379 cluster nodes | grep -q ${master_id}
+        then
+            ok=1
+            break
+        else
+            tries=$(( tries + 1 ))
+            sleep 1
+        fi
+    done
+    if [ "${ok}" != "1" ]
+    then
+        echo "Cluster meet failed"
+        exit 1
+    fi
+    valkey-cli -e -a functestpassword -p 6379 cluster replicate ${master_id}
+    tries=0
+    ok=0
+    while [ ${tries} -le 60 ]
+    do
+        if valkey-cli -e -a functestpassword -p 6379 cluster nodes | grep -q myself,slave
+        then
+            ok=1
+            break
+        else
+            tries=$(( tries + 1 ))
+            sleep 1
+        fi
+    done
+    if [ "${ok}" != "1" ]
+    then
+        echo "Cluster replication init failed"
+        exit 1
+    fi
+else
+    valkey-cli -e -a functestpassword -p 6379 config set offline no
+    valkey-cli -e -a functestpassword -p 6379 cluster addslotsrange 0 16383
+fi
+
+cp /var/lib/dist/valkey/supervisor_rdsync.conf /etc/supervisor/conf.d/rdsync.conf
+cp /var/lib/dist/valkey/rdsync_cluster.yaml /etc/rdsync.yaml
+
+/var/lib/dist/base/generate_certs.sh
+
+supervisorctl update
diff --git a/tests/images/valkey/setup_sentinel.sh b/tests/images/valkey/setup_sentinel.sh
new file mode 100755
index 0000000..221bfd3
--- /dev/null
+++ b/tests/images/valkey/setup_sentinel.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+set -xe
+
+MASTER=${1}
+
+if [ "${MASTER}" != "" ]
+then
+    valkey-cli -e -a functestpassword -p 6379 config set offline no
+    master_addr=$(host ${MASTER} | awk '{print $NF}')
+    valkey-cli -e -a functestpassword -p 6379 replicaof ${master_addr} 6379
+    valkey-cli -e -a functestpassword -p 6379 config rewrite
+    tries=0
+    ok=0
+    while [ ${tries} -le 60 ]
+    do
+        if valkey-cli -e -a functestpassword -p 6379 info replication | grep -q master_link_status:up
+        then
+            ok=1
+            break
+        else
+            tries=$(( tries + 1 ))
+            sleep 1
+        fi
+    done
+    if [ "${ok}" != "1" ]
+    then
+        echo "Cluster meet failed"
+        exit 1
+    fi
+else
+    valkey-cli -e -a functestpassword -p 6379 config set offline no
+fi
+
+cp /var/lib/dist/valkey/supervisor_rdsync.conf /etc/supervisor/conf.d/rdsync.conf
+cp /var/lib/dist/valkey/rdsync_sentinel.yaml /etc/rdsync.yaml
+
+cp /var/lib/dist/valkey/supervisor_senticache.conf /etc/supervisor/conf.d/senticache.conf
+cp /var/lib/dist/valkey/senticache.conf /etc/valkey/senticache.conf
+
+/var/lib/dist/base/generate_certs.sh
+
+supervisorctl update
diff --git a/tests/images/redis/supervisor_rdsync.conf b/tests/images/valkey/supervisor_rdsync.conf
similarity index 100%
rename from tests/images/redis/supervisor_rdsync.conf
rename to tests/images/valkey/supervisor_rdsync.conf
diff --git a/tests/images/redis/supervisor_senticache.conf b/tests/images/valkey/supervisor_senticache.conf
similarity index 54%
rename from tests/images/redis/supervisor_senticache.conf
rename to tests/images/valkey/supervisor_senticache.conf
index e3789d1..1bca6f0 100644
--- a/tests/images/redis/supervisor_senticache.conf
+++ b/tests/images/valkey/supervisor_senticache.conf
@@ -1,9 +1,9 @@
 [program:senticache]
-command=/usr/bin/redis-senticache /etc/redis/senticache.conf
+command=/usr/bin/valkey-senticache /etc/valkey/senticache.conf
 process_name=%(program_name)s
 autostart=true
 autorestart=true
 stopsignal=TERM
 priority=5
 redirect_stderr=true
-stdout_logfile=/var/log/redis/senticache.log
+stdout_logfile=/var/log/valkey/senticache.log
diff --git a/tests/images/valkey/supervisor_valkey.conf b/tests/images/valkey/supervisor_valkey.conf
new file mode 100644
index 0000000..2fb14b9
--- /dev/null
+++ b/tests/images/valkey/supervisor_valkey.conf
@@ -0,0 +1,9 @@
+[program:valkey]
+command=/usr/bin/valkey-server /etc/valkey/valkey.conf
+process_name=%(program_name)s
+autostart=true
+autorestart=false
+stopsignal=TERM
+priority=5
+redirect_stderr=true
+stdout_logfile=/var/log/valkey/server.log
diff --git a/tests/images/zookeeper/setup_zk.sh b/tests/images/zookeeper/setup_zk.sh
index a576f64..de37e53 100755
--- a/tests/images/zookeeper/setup_zk.sh
+++ b/tests/images/zookeeper/setup_zk.sh
@@ -4,6 +4,6 @@ set -xe
 
 retriable_path_create.sh /test
 retriable_path_create.sh /test/ha_nodes
-retriable_path_create.sh /test/ha_nodes/redis1 set_priority
-retriable_path_create.sh /test/ha_nodes/redis2 set_priority
-retriable_path_create.sh /test/ha_nodes/redis3 set_priority
+retriable_path_create.sh /test/ha_nodes/valkey1 set_priority
+retriable_path_create.sh /test/ha_nodes/valkey2 set_priority
+retriable_path_create.sh /test/ha_nodes/valkey3 set_priority
diff --git a/tests/rdsync_test.go b/tests/rdsync_test.go
index 69ebdee..85bf438 100644
--- a/tests/rdsync_test.go
+++ b/tests/rdsync_test.go
@@ -7,6 +7,7 @@ import (
 	"html/template"
 	"io"
 	"log"
+	"net"
 	"os"
 	"path/filepath"
 	"strings"
@@ -15,7 +16,7 @@ import (
 
 	"github.com/cucumber/godog"
 	"github.com/go-zookeeper/zk"
-	"github.com/redis/go-redis/v9"
+	client "github.com/valkey-io/valkey-go"
 
 	"github.com/yandex/rdsync/internal/dcs"
 	"github.com/yandex/rdsync/tests/testutil"
@@ -23,25 +24,25 @@ import (
 )
 
 const (
-	zkName                     = "zoo"
-	zkPort                     = 2181
-	zkConnectTimeout           = 5 * time.Second
-	redisName                  = "redis"
-	redisPort                  = 6379
-	senticachePort             = 26379
-	redisPassword              = "functestpassword"
-	redisConnectTimeout        = 30 * time.Second
-	redisInitialConnectTimeout = 2 * time.Minute
-	redisCmdTimeout            = 15 * time.Second
-	testUser                   = "testuser"
-	testPassword               = "testpassword123"
+	zkName                      = "zoo"
+	zkPort                      = 2181
+	zkConnectTimeout            = 5 * time.Second
+	valkeyName                  = "valkey"
+	valkeyPort                  = 6379
+	senticachePort              = 26379
+	valkeyPassword              = "functestpassword"
+	valkeyConnectTimeout        = 30 * time.Second
+	valkeyInitialConnectTimeout = 2 * time.Minute
+	valkeyCmdTimeout            = 15 * time.Second
+	testUser                    = "testuser"
+	testPassword                = "testpassword123"
 )
 
-var redisLogsToSave = map[string]string{
-	"/var/log/supervisor.log":       "supervisor.log",
-	"/var/log/rdsync.log":           "rdsync.log",
-	"/var/log/redis/server.log":     "redis.log",
-	"/var/log/redis/senticache.log": "senticache.log",
+var valkeyLogsToSave = map[string]string{
+	"/var/log/supervisor.log":        "supervisor.log",
+	"/var/log/rdsync.log":            "rdsync.log",
+	"/var/log/valkey/server.log":     "valkey.log",
+	"/var/log/valkey/senticache.log": "senticache.log",
 }
 
 var zkLogsToSave = map[string]string{
@@ -60,10 +61,10 @@ type testContext struct {
 	composer            testutil.Composer
 	composerEnv         []string
 	zk                  *zk.Conn
-	conns               map[string]*redis.Client
-	senticaches         map[string]*redis.Client
+	conns               map[string]client.Client
+	senticaches         map[string]client.Client
 	zkQueryResult       string
-	redisCmdResult      string
+	valkeyCmdResult     string
 	senticacheCmdResult string
 	commandRetcode      int
 	commandOutput       string
@@ -77,8 +78,8 @@ func newTestContext() (*testContext, error) {
 	if err != nil {
 		return nil, err
 	}
-	tctx.conns = make(map[string]*redis.Client)
-	tctx.senticaches = make(map[string]*redis.Client)
+	tctx.conns = make(map[string]client.Client)
+	tctx.senticaches = make(map[string]client.Client)
 	tctx.acl = zk.DigestACL(zk.PermAll, testUser, testPassword)
 	return tctx, nil
 }
@@ -87,8 +88,8 @@ func (tctx *testContext) saveLogs(scenario string) error {
 	for _, service := range tctx.composer.Services() {
 		var logsToSave map[string]string
 		switch {
-		case strings.HasPrefix(service, redisName):
-			logsToSave = redisLogsToSave
+		case strings.HasPrefix(service, valkeyName):
+			logsToSave = valkeyLogsToSave
 		case strings.HasPrefix(service, zkName):
 			logsToSave = zkLogsToSave
 		default:
@@ -172,17 +173,13 @@ func (tctx *testContext) cleanup() {
 		tctx.zk = nil
 	}
 	for _, conn := range tctx.conns {
-		if err := conn.Close(); err != nil {
-			log.Printf("failed to close redis connection: %s", err)
-		}
+		conn.Close()
 	}
-	tctx.conns = make(map[string]*redis.Client)
+	tctx.conns = make(map[string]client.Client)
 	for _, conn := range tctx.senticaches {
-		if err := conn.Close(); err != nil {
-			log.Printf("failed to close senticache connection: %s", err)
-		}
+		conn.Close()
 	}
-	tctx.senticaches = make(map[string]*redis.Client)
+	tctx.senticaches = make(map[string]client.Client)
 	if err := tctx.composer.Down(); err != nil {
 		log.Printf("failed to tear down compose: %s", err)
 	}
@@ -190,7 +187,7 @@ func (tctx *testContext) cleanup() {
 	tctx.variables = make(map[string]interface{})
 	tctx.composerEnv = make([]string, 0)
 	tctx.zkQueryResult = ""
-	tctx.redisCmdResult = ""
+	tctx.valkeyCmdResult = ""
 	tctx.senticacheCmdResult = ""
 	tctx.commandRetcode = 0
 	tctx.commandOutput = ""
@@ -215,83 +212,102 @@ func (tctx *testContext) connectZookeeper(addrs []string, timeout time.Duration)
 	return conn, nil
 }
 
-func (tctx *testContext) connectRedis(addr string, timeout time.Duration) (*redis.Client, error) {
-	opts := redis.Options{
-		Addr:         addr,
-		Password:     redisPassword,
-		DialTimeout:  time.Second,
-		ReadTimeout:  time.Second,
-		PoolSize:     1,
-		MinIdleConns: 1,
-		Protocol:     2,
-	}
-	conn := redis.NewClient(&opts)
-	// redis connection is lazy, so we need ping it
+func (tctx *testContext) connectValkey(addr string, timeout time.Duration) (client.Client, error) {
+	opts := client.ClientOption{
+		InitAddress:           []string{addr},
+		AlwaysRESP2:           true,
+		ForceSingleClient:     true,
+		DisableAutoPipelining: true,
+		DisableCache:          true,
+		BlockingPoolMinSize:   1,
+		BlockingPoolCleanup:   time.Second,
+		Password:              valkeyPassword,
+		Dialer:                net.Dialer{Timeout: time.Second},
+		ConnWriteTimeout:      time.Second,
+	}
+	var conn client.Client
 	var err error
 	testutil.Retry(func() bool {
-		ctx, cancel := context.WithTimeout(context.Background(), redisCmdTimeout)
+		conn, err = client.NewClient(opts)
+		if err != nil {
+			conn = nil
+			return false
+		}
+		ctx, cancel := context.WithTimeout(context.Background(), valkeyCmdTimeout)
 		defer cancel()
-		err = conn.Ping(ctx).Err()
+		err = conn.Do(ctx, conn.B().Ping().Build()).Error()
 		return err == nil
 	}, timeout, time.Second)
 	if err != nil {
-		_ = conn.Close()
+		if conn != nil {
+			conn.Close()
+		}
 		return nil, err
 	}
 	return conn, nil
 }
 
-func (tctx *testContext) connectSenticache(addr string, timeout time.Duration) (*redis.Client, error) {
-	opts := redis.Options{
-		Addr:         addr,
-		DialTimeout:  time.Second,
-		ReadTimeout:  time.Second,
-		PoolSize:     1,
-		MinIdleConns: 1,
+func (tctx *testContext) connectSenticache(addr string, timeout time.Duration) (client.Client, error) {
+	opts := client.ClientOption{
+		InitAddress:           []string{addr},
+		AlwaysRESP2:           true,
+		ForceSingleClient:     true,
+		DisableAutoPipelining: true,
+		DisableCache:          true,
+		BlockingPoolMinSize:   1,
+		BlockingPoolCleanup:   time.Second,
+		Dialer:                net.Dialer{Timeout: time.Second},
+		ConnWriteTimeout:      time.Second,
 	}
-	conn := redis.NewClient(&opts)
-	// redis connection is lazy, so we need ping it
+	var conn client.Client
 	var err error
 	testutil.Retry(func() bool {
-		ctx, cancel := context.WithTimeout(context.Background(), redisCmdTimeout)
+		conn, err = client.NewClient(opts)
+		if err != nil {
+			conn = nil
+			return false
+		}
+		ctx, cancel := context.WithTimeout(context.Background(), valkeyCmdTimeout)
 		defer cancel()
-		err = conn.Ping(ctx).Err()
+		err = conn.Do(ctx, conn.B().Ping().Build()).Error()
 		return err == nil
 	}, timeout, time.Second)
 	if err != nil {
-		_ = conn.Close()
+		if conn != nil {
+			conn.Close()
+		}
 		return nil, err
 	}
 	return conn, nil
 }
 
-func (tctx *testContext) getRedisConnection(host string) (*redis.Client, error) {
+func (tctx *testContext) getValkeyConnection(host string) (client.Client, error) {
 	conn, ok := tctx.conns[host]
 	if !ok {
-		return nil, fmt.Errorf("redis %s is not in our host list", host)
+		return nil, fmt.Errorf("valkey %s is not in our host list", host)
 	}
-	err := conn.Ping(context.Background()).Err()
+	err := conn.Do(context.Background(), conn.B().Ping().Build()).Error()
 	if err == nil {
 		return conn, nil
 	}
-	addr, err := tctx.composer.GetAddr(host, redisPort)
+	addr, err := tctx.composer.GetAddr(host, valkeyPort)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get redis addr %s: %s", host, err)
+		return nil, fmt.Errorf("failed to get valkey addr %s: %s", host, err)
 	}
-	conn, err = tctx.connectRedis(addr, redisConnectTimeout)
+	conn, err = tctx.connectValkey(addr, valkeyConnectTimeout)
 	if err != nil {
-		return nil, fmt.Errorf("failed to connect to redis %s: %s", host, err)
+		return nil, fmt.Errorf("failed to connect to valkey %s: %s", host, err)
 	}
 	tctx.conns[host] = conn
 	return conn, nil
 }
 
-func (tctx *testContext) getSenticacheConnection(host string) (*redis.Client, error) {
+func (tctx *testContext) getSenticacheConnection(host string) (client.Client, error) {
 	conn, ok := tctx.senticaches[host]
 	if !ok {
 		return nil, fmt.Errorf("senticache %s is not in our host list", host)
 	}
-	err := conn.Ping(context.Background()).Err()
+	err := conn.Do(context.Background(), conn.B().Ping().Build()).Error()
 	if err == nil {
 		return conn, nil
 	}
@@ -299,7 +315,7 @@ func (tctx *testContext) getSenticacheConnection(host string) (*redis.Client, er
 	if err != nil {
 		return nil, fmt.Errorf("failed to get senticache addr %s: %s", host, err)
 	}
-	conn, err = tctx.connectSenticache(addr, redisConnectTimeout)
+	conn, err = tctx.connectSenticache(addr, valkeyConnectTimeout)
 	if err != nil {
 		return nil, fmt.Errorf("failed to connect to senticache %s: %s", host, err)
 	}
@@ -307,29 +323,39 @@ func (tctx *testContext) getSenticacheConnection(host string) (*redis.Client, er
 	return conn, nil
 }
 
-func (tctx *testContext) runRedisCmd(host string, cmd []string) (string, error) {
-	conn, err := tctx.getRedisConnection(host)
+func (tctx *testContext) runValkeyCmd(host string, cmd []string) (string, error) {
+	conn, err := tctx.getValkeyConnection(host)
 	if err != nil {
 		return "", err
 	}
 
-	tctx.redisCmdResult = ""
-	ctx, cancel := context.WithTimeout(context.Background(), redisCmdTimeout)
+	tctx.valkeyCmdResult = ""
+	ctx, cancel := context.WithTimeout(context.Background(), valkeyCmdTimeout)
 	defer cancel()
-	var iargs []interface{}
-	for _, arg := range cmd {
-		iargs = append(iargs, arg)
-	}
-	result := conn.Do(ctx, iargs...)
+	result := conn.Do(ctx, conn.B().Arbitrary(cmd...).Build())
 
-	err = result.Err()
+	err = result.Error()
 	if err != nil {
-		tctx.redisCmdResult = err.Error()
+		tctx.valkeyCmdResult = err.Error()
 	} else {
-		tctx.redisCmdResult = result.String()
+		message, err := result.ToMessage()
+		if err != nil {
+			tctx.valkeyCmdResult = err.Error()
+			return tctx.valkeyCmdResult, err
+		}
+		if message.IsArray() {
+			strSlice, err := message.AsStrSlice()
+			if err != nil {
+				tctx.valkeyCmdResult = err.Error()
+			} else {
+				tctx.valkeyCmdResult = strings.Join(strSlice, " ")
+			}
+		} else {
+			tctx.valkeyCmdResult = message.String()
+		}
 	}
 
-	return tctx.redisCmdResult, err
+	return tctx.valkeyCmdResult, err
 }
 
 func (tctx *testContext) runSenticacheCmd(host string, cmd []string) (string, error) {
@@ -339,15 +365,11 @@ func (tctx *testContext) runSenticacheCmd(host string, cmd []string) (string, er
 	}
 
 	tctx.senticacheCmdResult = ""
-	ctx, cancel := context.WithTimeout(context.Background(), redisCmdTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), valkeyCmdTimeout)
 	defer cancel()
-	var iargs []interface{}
-	for _, arg := range cmd {
-		iargs = append(iargs, arg)
-	}
-	result := conn.Do(ctx, iargs...)
+	result := conn.Do(ctx, conn.B().Arbitrary(cmd...).Build())
 
-	err = result.Err()
+	err = result.Error()
 	if err != nil {
 		tctx.senticacheCmdResult = err.Error()
 	} else {
@@ -387,10 +409,10 @@ func (tctx *testContext) baseShardIsUpAndRunning() error {
 	}
 
 	err = tctx.composer.RunCommandAtHosts("/var/lib/dist/base/generate_certs.sh && supervisorctl restart rdsync",
-		"redis",
+		"valkey",
 		time.Minute)
 	if err != nil {
-		return fmt.Errorf("failed to generate certs in redis hosts: %s", err)
+		return fmt.Errorf("failed to generate certs in valkey hosts: %s", err)
 	}
 
 	if err = tctx.createZookeeperNode("/test"); err != nil {
@@ -400,9 +422,9 @@ func (tctx *testContext) baseShardIsUpAndRunning() error {
 		return fmt.Errorf("failed to create path prefix zk node due %s", err)
 	}
 
-	// prepare redis nodes
+	// prepare valkey nodes
 	for _, service := range tctx.composer.Services() {
-		if strings.HasPrefix(service, redisName) {
+		if strings.HasPrefix(service, valkeyName) {
 			if err = tctx.createZookeeperNode(dcs.JoinPath("/test", dcs.PathHANodesPrefix, service)); err != nil {
 				return fmt.Errorf("failed to create %s zk node due %s", service, err)
 			}
@@ -416,29 +438,29 @@ func (tctx *testContext) stepClusteredShardIsUpAndRunning() error {
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis1", "setup_cluster.sh", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey1", "setup_cluster.sh", 1*time.Minute)
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis2", "setup_cluster.sh redis1", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey2", "setup_cluster.sh valkey1", 1*time.Minute)
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis3", "setup_cluster.sh redis1", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey3", "setup_cluster.sh valkey1", 1*time.Minute)
 	if err != nil {
 		return err
 	}
 
-	// check redis nodes
+	// check valkey nodes
 	for _, service := range tctx.composer.Services() {
-		if strings.HasPrefix(service, redisName) {
-			addr, err := tctx.composer.GetAddr(service, redisPort)
+		if strings.HasPrefix(service, valkeyName) {
+			addr, err := tctx.composer.GetAddr(service, valkeyPort)
 			if err != nil {
-				return fmt.Errorf("failed to get redis addr %s: %s", service, err)
+				return fmt.Errorf("failed to get valkey addr %s: %s", service, err)
 			}
-			conn, err := tctx.connectRedis(addr, redisInitialConnectTimeout)
+			conn, err := tctx.connectValkey(addr, valkeyInitialConnectTimeout)
 			if err != nil {
-				return fmt.Errorf("failed to connect to redis %s: %s", service, err)
+				return fmt.Errorf("failed to connect to valkey %s: %s", service, err)
 			}
 			tctx.conns[service] = conn
 		}
@@ -451,35 +473,35 @@ func (tctx *testContext) stepSentinelShardIsUpAndRunning() error {
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis1", "setup_sentinel.sh", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey1", "setup_sentinel.sh", 1*time.Minute)
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis2", "setup_sentinel.sh redis1", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey2", "setup_sentinel.sh valkey1", 1*time.Minute)
 	if err != nil {
 		return err
 	}
-	_, _, err = tctx.composer.RunCommand("redis3", "setup_sentinel.sh redis1", 1*time.Minute)
+	_, _, err = tctx.composer.RunCommand("valkey3", "setup_sentinel.sh valkey1", 1*time.Minute)
 	if err != nil {
 		return err
 	}
-	// check redis nodes
+	// check valkey nodes
 	for _, service := range tctx.composer.Services() {
-		if strings.HasPrefix(service, redisName) {
-			addr, err := tctx.composer.GetAddr(service, redisPort)
+		if strings.HasPrefix(service, valkeyName) {
+			addr, err := tctx.composer.GetAddr(service, valkeyPort)
 			if err != nil {
-				return fmt.Errorf("failed to get redis addr %s: %s", service, err)
+				return fmt.Errorf("failed to get valkey addr %s: %s", service, err)
 			}
-			conn, err := tctx.connectRedis(addr, redisInitialConnectTimeout)
+			conn, err := tctx.connectValkey(addr, valkeyInitialConnectTimeout)
 			if err != nil {
-				return fmt.Errorf("failed to connect to redis %s: %s", service, err)
+				return fmt.Errorf("failed to connect to valkey %s: %s", service, err)
 			}
 			tctx.conns[service] = conn
 			saddr, err2 := tctx.composer.GetAddr(service, senticachePort)
 			if err2 != nil {
 				return fmt.Errorf("failed to get senticache addr %s: %s", service, err2)
 			}
-			sconn, err2 := tctx.connectSenticache(saddr, redisInitialConnectTimeout)
+			sconn, err2 := tctx.connectSenticache(saddr, valkeyInitialConnectTimeout)
 			if err2 != nil {
 				return fmt.Errorf("failed to connect to senticache %s: %s", service, err2)
 			}
@@ -491,7 +513,7 @@ func (tctx *testContext) stepSentinelShardIsUpAndRunning() error {
 
 func (tctx *testContext) stepPersistenceDisabled() error {
 	for _, service := range tctx.composer.Services() {
-		if strings.HasPrefix(service, redisName) {
+		if strings.HasPrefix(service, valkeyName) {
 			_, _, err := tctx.composer.RunCommand(service, "sed -i /OnReplicas/d /etc/rdsync.yaml", 10*time.Second)
 			if err != nil {
 				return err
@@ -500,19 +522,19 @@ func (tctx *testContext) stepPersistenceDisabled() error {
 			if err != nil {
 				return err
 			}
-			_, err = tctx.runRedisCmd(service, []string{"CONFIG", "SET", "appendonly", "no"})
+			_, err = tctx.runValkeyCmd(service, []string{"CONFIG", "SET", "appendonly", "no"})
 			if err != nil {
 				return err
 			}
-			_, err = tctx.runRedisCmd(service, []string{"CONFIG", "SET", "save", ""})
+			_, err = tctx.runValkeyCmd(service, []string{"CONFIG", "SET", "save", ""})
 			if err != nil {
 				return err
 			}
-			_, _, err = tctx.composer.RunCommand(service, "echo 'appendonly no' >> /etc/redis/redis.conf", 10*time.Second)
+			_, _, err = tctx.composer.RunCommand(service, "echo 'appendonly no' >> /etc/valkey/valkey.conf", 10*time.Second)
 			if err != nil {
 				return err
 			}
-			_, _, err = tctx.composer.RunCommand(service, "echo 'save \\'\\'' >> /etc/redis/redis.conf", 10*time.Second)
+			_, _, err = tctx.composer.RunCommand(service, "echo 'save \\'\\'' >> /etc/valkey/valkey.conf", 10*time.Second)
 			if err != nil {
 				return err
 			}
@@ -561,26 +583,26 @@ func (tctx *testContext) stepHostIsDeleted(host string) error {
 	return tctx.stepIDeleteZookeeperNode(dcs.JoinPath("/test", dcs.PathHANodesPrefix, host))
 }
 
-func (tctx *testContext) stepRedisOnHostKilled(host string) error {
-	cmd := "supervisorctl signal KILL redis"
+func (tctx *testContext) stepValkeyOnHostKilled(host string) error {
+	cmd := "supervisorctl signal KILL valkey"
 	_, _, err := tctx.composer.RunCommand(host, cmd, 10*time.Second)
 	return err
 }
 
-func (tctx *testContext) stepRedisOnHostStarted(host string) error {
-	cmd := "supervisorctl start redis"
+func (tctx *testContext) stepValkeyOnHostStarted(host string) error {
+	cmd := "supervisorctl start valkey"
 	_, _, err := tctx.composer.RunCommand(host, cmd, 10*time.Second)
 	return err
 }
 
-func (tctx *testContext) stepRedisOnHostRestarted(host string) error {
-	cmd := "supervisorctl restart redis"
+func (tctx *testContext) stepValkeyOnHostRestarted(host string) error {
+	cmd := "supervisorctl restart valkey"
 	_, _, err := tctx.composer.RunCommand(host, cmd, 30*time.Second)
 	return err
 }
 
-func (tctx *testContext) stepRedisOnHostStopped(host string) error {
-	cmd := "supervisorctl signal TERM redis"
+func (tctx *testContext) stepValkeyOnHostStopped(host string) error {
+	cmd := "supervisorctl signal TERM valkey"
 	_, _, err := tctx.composer.RunCommand(host, cmd, 10*time.Second)
 	return err
 }
@@ -694,16 +716,16 @@ func (tctx *testContext) stepIRunCmdOnHost(host string, body *godog.DocString) e
 			args = append(args, strings.Split(strings.TrimSpace(arg), " ")...)
 		}
 	}
-	_, err := tctx.runRedisCmd(host, args)
+	_, err := tctx.runValkeyCmd(host, args)
 	return err
 }
 
-func (tctx *testContext) stepRedisCmdResultShouldMatch(matcher string, body *godog.DocString) error {
+func (tctx *testContext) stepValkeyCmdResultShouldMatch(matcher string, body *godog.DocString) error {
 	m, err := matchers.GetMatcher(matcher)
 	if err != nil {
 		return err
 	}
-	return m(tctx.redisCmdResult, strings.TrimSpace(body.Content))
+	return m(tctx.valkeyCmdResult, strings.TrimSpace(body.Content))
 }
 
 func (tctx *testContext) stepIRunSenticacheCmdOnHost(host string, body *godog.DocString) error {
@@ -729,7 +751,7 @@ func (tctx *testContext) stepSenticacheCmdResultShouldMatch(matcher string, body
 }
 
 func (tctx *testContext) stepBreakReplicationOnHost(host string) error {
-	if _, err := tctx.runRedisCmd(host, []string{"CONFIG", "SET", "repl-paused", "yes"}); err != nil {
+	if _, err := tctx.runValkeyCmd(host, []string{"CONFIG", "SET", "repl-paused", "yes"}); err != nil {
 		return err
 	}
 	return nil
@@ -839,8 +861,8 @@ func (tctx *testContext) stepZookeeperNodeShouldNotExistWithin(node string, time
 	return err
 }
 
-func (tctx *testContext) stepRedisHostShouldBeMaster(host string) error {
-	res, err := tctx.runRedisCmd(host, []string{"ROLE"})
+func (tctx *testContext) stepValkeyHostShouldBeMaster(host string) error {
+	res, err := tctx.runValkeyCmd(host, []string{"ROLE"})
 	if err != nil {
 		return err
 	}
@@ -848,8 +870,8 @@ func (tctx *testContext) stepRedisHostShouldBeMaster(host string) error {
 	return m(res, ".*master.*")
 }
 
-func (tctx *testContext) stepRedisHostShouldBeReplicaOf(host, master string) error {
-	res, err := tctx.runRedisCmd(host, []string{"INFO", "replication"})
+func (tctx *testContext) stepValkeyHostShouldBeReplicaOf(host, master string) error {
+	res, err := tctx.runValkeyCmd(host, []string{"INFO", "replication"})
 	if err != nil {
 		return err
 	}
@@ -861,17 +883,17 @@ func (tctx *testContext) stepRedisHostShouldBeReplicaOf(host, master string) err
 	return m(res, fmt.Sprintf(".*master_host:(%s|%s).*", master, ip))
 }
 
-func (tctx *testContext) stepRedisHostShouldBecomeReplicaOfWithin(host, master string, timeout int) error {
+func (tctx *testContext) stepValkeyHostShouldBecomeReplicaOfWithin(host, master string, timeout int) error {
 	var err error
 	testutil.Retry(func() bool {
-		err = tctx.stepRedisHostShouldBeReplicaOf(host, master)
+		err = tctx.stepValkeyHostShouldBeReplicaOf(host, master)
 		return err == nil
 	}, time.Duration(timeout*int(time.Second)), time.Second)
 	return err
 }
 
-func (tctx *testContext) stepReplicationOnRedisHostShouldRunFine(host string) error {
-	res, err := tctx.runRedisCmd(host, []string{"INFO", "replication"})
+func (tctx *testContext) stepReplicationOnValkeyHostShouldRunFine(host string) error {
+	res, err := tctx.runValkeyCmd(host, []string{"INFO", "replication"})
 	if err != nil {
 		return err
 	}
@@ -879,45 +901,45 @@ func (tctx *testContext) stepReplicationOnRedisHostShouldRunFine(host string) er
 	return m(res, ".*master_link_status:up.*")
 }
 
-func (tctx *testContext) stepReplicationOnRedisHostShouldRunFineWithin(host string, timeout int) error {
+func (tctx *testContext) stepReplicationOnValkeyHostShouldRunFineWithin(host string, timeout int) error {
 	var err error
 	testutil.Retry(func() bool {
-		err = tctx.stepReplicationOnRedisHostShouldRunFine(host)
+		err = tctx.stepReplicationOnValkeyHostShouldRunFine(host)
 		return err == nil
 	}, time.Duration(timeout*int(time.Second)), time.Second)
 	return err
 }
 
-func (tctx *testContext) stepRedisHostShouldBecomeUnavailableWithin(host string, timeout int) error {
-	addr, err := tctx.composer.GetAddr(host, redisPort)
+func (tctx *testContext) stepValkeyHostShouldBecomeUnavailableWithin(host string, timeout int) error {
+	addr, err := tctx.composer.GetAddr(host, valkeyPort)
 	if err != nil {
-		return fmt.Errorf("failed to get redis addr %s: %s", host, err)
+		return fmt.Errorf("failed to get valkey addr %s: %s", host, err)
 	}
 	testutil.Retry(func() bool {
-		var conn *redis.Client
-		conn, err = tctx.connectRedis(addr, time.Second)
+		var conn client.Client
+		conn, err = tctx.connectValkey(addr, time.Second)
 		if err == nil {
-			_ = conn.Close()
+			conn.Close()
 			return false
 		}
 		return true
 	}, time.Duration(timeout*int(time.Second)), time.Second)
 	if err == nil {
-		return fmt.Errorf("redis host %s is still available", host)
+		return fmt.Errorf("valkey host %s is still available", host)
 	}
 	return nil
 }
 
-func (tctx *testContext) stepRedisHostShouldBecomeAvailableWithin(host string, timeout int) error {
-	addr, err := tctx.composer.GetAddr(host, redisPort)
+func (tctx *testContext) stepValkeyHostShouldBecomeAvailableWithin(host string, timeout int) error {
+	addr, err := tctx.composer.GetAddr(host, valkeyPort)
 	if err != nil {
-		return fmt.Errorf("failed to get redis addr %s: %s", host, err)
+		return fmt.Errorf("failed to get valkey addr %s: %s", host, err)
 	}
 	testutil.Retry(func() bool {
-		var conn *redis.Client
-		conn, err = tctx.connectRedis(addr, redisConnectTimeout)
+		var conn client.Client
+		conn, err = tctx.connectValkey(addr, valkeyConnectTimeout)
 		if err == nil {
-			_ = conn.Close()
+			conn.Close()
 			return true
 		}
 		return false
@@ -958,8 +980,8 @@ func (tctx *testContext) stepISaveZookeeperQueryResultAs(varname string) error {
 	return nil
 }
 
-func (tctx *testContext) stepISaveRedisCmdResultAs(varname string) error {
-	tctx.variables[varname] = tctx.redisCmdResult
+func (tctx *testContext) stepISaveValkeyCmdResultAs(varname string) error {
+	tctx.variables[varname] = tctx.valkeyCmdResult
 	return nil
 }
 
@@ -1029,7 +1051,7 @@ func InitializeScenario(s *godog.ScenarioContext) {
 	s.After(func(ctx context.Context, scenario *godog.Scenario, err error) (context.Context, error) {
 		if err != nil {
 			name := scenario.Name
-			name = strings.Replace(name, " ", "_", -1)
+			name = strings.ReplaceAll(name, " ", "_")
 			err2 := tctx.saveLogs(name)
 			if err2 != nil {
 				log.Printf("failed to save logs: %v", err2)
@@ -1070,8 +1092,8 @@ func InitializeScenario(s *godog.ScenarioContext) {
 	s.Step(`^I run command on host "([^"]*)" until result match regexp "([^"]*)" with timeout "(\d+)" seconds$`, tctx.stepIRunCommandOnHostUntilResultMatch)
 	s.Step(`^command return code should be "(\d+)"$`, tctx.stepCommandReturnCodeShouldBe)
 	s.Step(`^command output should match (\w+)$`, tctx.stepCommandOutputShouldMatch)
-	s.Step(`^I run command on redis host "([^"]*)"$`, tctx.stepIRunCmdOnHost)
-	s.Step(`^redis cmd result should match (\w+)$`, tctx.stepRedisCmdResultShouldMatch)
+	s.Step(`^I run command on valkey host "([^"]*)"$`, tctx.stepIRunCmdOnHost)
+	s.Step(`^valkey cmd result should match (\w+)$`, tctx.stepValkeyCmdResultShouldMatch)
 	s.Step(`^I run command on senticache host "([^"]*)"$`, tctx.stepIRunSenticacheCmdOnHost)
 	s.Step(`^senticache cmd result should match (\w+)$`, tctx.stepSenticacheCmdResultShouldMatch)
 
@@ -1088,31 +1110,31 @@ func InitializeScenario(s *godog.ScenarioContext) {
 	s.Step(`^zookeeper node "([^"]*)" should not exist$`, tctx.stepZookeeperNodeShouldNotExist)
 	s.Step(`^zookeeper node "([^"]*)" should not exist within "(\d+)" seconds$`, tctx.stepZookeeperNodeShouldNotExistWithin)
 
-	// redis checking
-	s.Step(`^redis host "([^"]*)" should be master$`, tctx.stepRedisHostShouldBeMaster)
-	s.Step(`^redis host "([^"]*)" should be replica of "([^"]*)"$`, tctx.stepRedisHostShouldBeReplicaOf)
-	s.Step(`^redis host "([^"]*)" should become replica of "([^"]*)" within "(\d+)" seconds$`, tctx.stepRedisHostShouldBecomeReplicaOfWithin)
-	s.Step(`^replication on redis host "([^"]*)" should run fine$`, tctx.stepReplicationOnRedisHostShouldRunFine)
-	s.Step(`^replication on redis host "([^"]*)" should run fine within "(\d+)" seconds$`, tctx.stepReplicationOnRedisHostShouldRunFineWithin)
+	// valkey checking
+	s.Step(`^valkey host "([^"]*)" should be master$`, tctx.stepValkeyHostShouldBeMaster)
+	s.Step(`^valkey host "([^"]*)" should be replica of "([^"]*)"$`, tctx.stepValkeyHostShouldBeReplicaOf)
+	s.Step(`^valkey host "([^"]*)" should become replica of "([^"]*)" within "(\d+)" seconds$`, tctx.stepValkeyHostShouldBecomeReplicaOfWithin)
+	s.Step(`^replication on valkey host "([^"]*)" should run fine$`, tctx.stepReplicationOnValkeyHostShouldRunFine)
+	s.Step(`^replication on valkey host "([^"]*)" should run fine within "(\d+)" seconds$`, tctx.stepReplicationOnValkeyHostShouldRunFineWithin)
 
-	s.Step(`^redis host "([^"]*)" should become unavailable within "(\d+)" seconds$`, tctx.stepRedisHostShouldBecomeUnavailableWithin)
-	s.Step(`^redis host "([^"]*)" should become available within "(\d+)" seconds$`, tctx.stepRedisHostShouldBecomeAvailableWithin)
+	s.Step(`^valkey host "([^"]*)" should become unavailable within "(\d+)" seconds$`, tctx.stepValkeyHostShouldBecomeUnavailableWithin)
+	s.Step(`^valkey host "([^"]*)" should become available within "(\d+)" seconds$`, tctx.stepValkeyHostShouldBecomeAvailableWithin)
 
 	// senticache checking
 	s.Step(`^senticache host "([^"]*)" should have master "([^"]*)"$`, tctx.stepSenticacheHostShouldHaveMaster)
 	s.Step(`^senticache host "([^"]*)" should have master "([^"]*)" within "(\d+)" seconds$`, tctx.stepSenticacheHostShouldHaveMasterWithin)
 
-	// redis manipulation
-	s.Step(`^redis on host "([^"]*)" is killed$`, tctx.stepRedisOnHostKilled)
-	s.Step(`^redis on host "([^"]*)" is started$`, tctx.stepRedisOnHostStarted)
-	s.Step(`^redis on host "([^"]*)" is restarted$`, tctx.stepRedisOnHostRestarted)
-	s.Step(`^redis on host "([^"]*)" is stopped$`, tctx.stepRedisOnHostStopped)
+	// valkey manipulation
+	s.Step(`^valkey on host "([^"]*)" is killed$`, tctx.stepValkeyOnHostKilled)
+	s.Step(`^valkey on host "([^"]*)" is started$`, tctx.stepValkeyOnHostStarted)
+	s.Step(`^valkey on host "([^"]*)" is restarted$`, tctx.stepValkeyOnHostRestarted)
+	s.Step(`^valkey on host "([^"]*)" is stopped$`, tctx.stepValkeyOnHostStopped)
 	s.Step(`^I break replication on host "([^"]*)"$`, tctx.stepBreakReplicationOnHost)
 
 	// variables
 	s.Step(`^I save zookeeper query result as "([^"]*)"$`, tctx.stepISaveZookeeperQueryResultAs)
 	s.Step(`^I save command output as "([^"]*)"$`, tctx.stepISaveCommandOutputAs)
-	s.Step(`^I save redis cmd result as "([^"]*)"$`, tctx.stepISaveRedisCmdResultAs)
+	s.Step(`^I save valkey cmd result as "([^"]*)"$`, tctx.stepISaveValkeyCmdResultAs)
 	s.Step(`^I save "([^"]*)" as "([^"]*)"$`, tctx.stepISaveValAs)
 
 	// misc
diff --git a/redis_patches/0001_Add_replication_pause.patch b/valkey_patches/0001_Add_replication_pause.patch
similarity index 64%
rename from redis_patches/0001_Add_replication_pause.patch
rename to valkey_patches/0001_Add_replication_pause.patch
index 9ea28b6..45c67f8 100644
--- a/redis_patches/0001_Add_replication_pause.patch
+++ b/valkey_patches/0001_Add_replication_pause.patch
@@ -1,8 +1,8 @@
 diff --git a/src/config.c b/src/config.c
-index b26704283..1d720132a 100644
+index 2d4e703d9..c1351d74f 100644
 --- a/src/config.c
 +++ b/src/config.c
-@@ -2525,6 +2525,25 @@ static int updateAofAutoGCEnabled(const char **err) {
+@@ -2526,6 +2526,25 @@ static int updateExtendedRedisCompat(const char **err) {
      return 1;
  }
  
@@ -10,13 +10,13 @@ index b26704283..1d720132a 100644
 +    UNUSED(err);
 +
 +    if (server.repl_paused) {
-+        if (server.masterhost) {
-+            if (server.repl_state == REPL_STATE_CONNECTING || slaveIsInHandshakeState() ||
-+                    server.repl_state == REPL_STATE_TRANSFER) {
++        if (server.primary_host) {
++            if (server.repl_state == REPL_STATE_CONNECTING || replicaIsInHandshakeState() ||
++                server.repl_state == REPL_STATE_TRANSFER) {
 +                cancelReplicationHandshake(0);
 +            }
-+            if (server.master) {
-+                freeClient(server.master);
++            if (server.primary) {
++                freeClient(server.primary);
 +            }
 +            server.repl_state = REPL_STATE_CONNECT;
 +        }
@@ -28,65 +28,65 @@ index b26704283..1d720132a 100644
  static int updateSighandlerEnabled(const char **err) {
      UNUSED(err);
      if (server.crashlog_enabled)
-@@ -3095,6 +3114,7 @@ standardConfig static_configs[] = {
-     createBoolConfig("latency-tracking", NULL, MODIFIABLE_CONFIG, server.latency_tracking_enabled, 1, NULL, NULL),
-     createBoolConfig("aof-disable-auto-gc", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, server.aof_disable_auto_gc, 0, NULL, updateAofAutoGCEnabled),
-     createBoolConfig("replica-ignore-disk-write-errors", NULL, MODIFIABLE_CONFIG, server.repl_ignore_disk_write_error, 0, NULL, NULL),
+@@ -3107,6 +3126,7 @@ standardConfig static_configs[] = {
+     createBoolConfig("enable-debug-assert", NULL, IMMUTABLE_CONFIG | HIDDEN_CONFIG, server.enable_debug_assert, 0, NULL, NULL),
+     createBoolConfig("cluster-slot-stats-enabled", NULL, MODIFIABLE_CONFIG, server.cluster_slot_stats_enabled, 0, NULL, NULL),
+     createBoolConfig("hide-user-data-from-log", NULL, MODIFIABLE_CONFIG, server.hide_user_data_from_log, 1, NULL, NULL),
 +    createBoolConfig("repl-paused", NULL, MODIFIABLE_CONFIG, server.repl_paused, 0, NULL, updateReplPaused),
  
      /* String Configs */
      createStringConfig("aclfile", NULL, IMMUTABLE_CONFIG, ALLOW_EMPTY_STRING, server.acl_filename, "", NULL, NULL),
 diff --git a/src/replication.c b/src/replication.c
-index fbd9b0a99..47172dba3 100644
+index 249c190ea..a1ec23f38 100644
 --- a/src/replication.c
 +++ b/src/replication.c
-@@ -47,7 +47,6 @@ void replicationResurrectCachedMaster(connection *conn);
+@@ -51,7 +51,6 @@ void replicationResurrectProvisionalPrimary(void);
  void replicationSendAck(void);
- int replicaPutOnline(client *slave);
- void replicaStartCommandStream(client *slave);
+ int replicaPutOnline(client *replica);
+ void replicaStartCommandStream(client *replica);
 -int cancelReplicationHandshake(int reconnect);
+ void replicationSteadyStateInit(void);
+ void setupMainConnForPsync(connection *conn);
+ void dualChannelSyncHandleRdbLoadCompletion(void);
+@@ -1067,7 +1066,7 @@ void syncCommand(client *c) {
  
- /* We take a global flag to remember if this instance generated an RDB
-  * because of replication, so that we can remove the RDB file in case
-@@ -970,7 +969,7 @@ void syncCommand(client *c) {
- 
-     /* Refuse SYNC requests if we are a slave but the link with our master
+     /* Refuse SYNC requests if we are a replica but the link with our primary
       * is not ok... */
--    if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED) {
-+    if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED && !server.repl_paused) {
-         addReplyError(c,"-NOMASTERLINK Can't SYNC while not connected with my master");
+-    if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED) {
++    if (server.primary_host && server.repl_state != REPL_STATE_CONNECTED && !server.repl_paused) {
+         addReplyError(c, "-NOMASTERLINK Can't SYNC while not connected with my master");
          return;
      }
-@@ -3743,7 +3742,7 @@ void replicationCron(void) {
+@@ -4520,7 +4519,7 @@ void replicationCron(void) {
      }
  
-     /* Check if we should connect to a MASTER */
+     /* Check if we should connect to a PRIMARY */
 -    if (server.repl_state == REPL_STATE_CONNECT) {
 +    if (server.repl_state == REPL_STATE_CONNECT && !server.repl_paused) {
-         serverLog(LL_NOTICE,"Connecting to MASTER %s:%d",
-             server.masterhost, server.masterport);
-         connectWithMaster();
+         serverLog(LL_NOTICE, "Connecting to PRIMARY %s:%d", server.primary_host, server.primary_port);
+         connectWithPrimary();
+     }
 diff --git a/src/server.h b/src/server.h
-index cb555031e..be2405170 100644
+index fceb2894a..d6dcd98fe 100644
 --- a/src/server.h
 +++ b/src/server.h
-@@ -1907,6 +1907,7 @@ struct redisServer {
+@@ -2095,6 +2095,7 @@ struct valkeyServer {
      /* Synchronous replication. */
-     list *clients_waiting_acks;         /* Clients waiting in WAIT or WAITAOF. */
-     int get_ack_from_slaves;            /* If true we send REPLCONF GETACK. */
-+    int repl_paused;                    /* If true we don't try to connect to master */
+     list *clients_waiting_acks; /* Clients waiting in WAIT or WAITAOF. */
+     int get_ack_from_replicas;  /* If true we send REPLCONF GETACK. */
++    int repl_paused;            /* If true we don't try to connect to master */
      /* Limits */
-     unsigned int maxclients;            /* Max number of simultaneous clients */
-     unsigned long long maxmemory;   /* Max number of memory bytes to use */
-@@ -2826,6 +2827,8 @@ void clearFailoverState(void);
+     unsigned int maxclients;                    /* Max number of simultaneous clients */
+     unsigned long long maxmemory;               /* Max number of memory bytes to use */
+@@ -3082,6 +3083,8 @@ void clearFailoverState(void);
  void updateFailoverStatus(void);
  void abortFailover(const char *err);
  const char *getFailoverStateString(void);
 +int cancelReplicationHandshake(int reconnect);
-+int slaveIsInHandshakeState(void);
++int replicaIsInHandshakeState(void);
+ int sendCurrentOffsetToReplica(client *replica);
+ void addRdbReplicaToPsyncWait(client *replica);
  
- /* Generic persistence functions */
- void startLoadingFile(size_t size, char* filename, int rdbflags);
 diff --git a/tests/cluster/tests/99-yandex-cloud-patches.tcl b/tests/cluster/tests/99-yandex-cloud-patches.tcl
 new file mode 100644
 index 000000000..6d0c1007b
@@ -203,15 +203,6 @@ index 000000000..a2c9bb949
 +        }
 +    }
 +}
-diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
-index 21fa35d4d..ad7707a29 100644
---- a/tests/test_helper.tcl
-+++ b/tests/test_helper.tcl
-@@ -103,6 +103,7 @@ set ::all_tests {
-     unit/cluster/slot-ownership
-     unit/cluster/links
-     unit/cluster/cluster-response-tls
-+    integration/yandex-cloud-patches
- }
- # Index to the next test to run in the ::all_tests list.
- set ::next_test 0
+-- 
+2.48.0
+
diff --git a/redis_patches/0002_Allow_explicit_cluster_replication_cascades.patch b/valkey_patches/0002_Allow_explicit_cluster_replication_cascades.patch
similarity index 59%
rename from redis_patches/0002_Allow_explicit_cluster_replication_cascades.patch
rename to valkey_patches/0002_Allow_explicit_cluster_replication_cascades.patch
index bd7e448..98c0026 100644
--- a/redis_patches/0002_Allow_explicit_cluster_replication_cascades.patch
+++ b/valkey_patches/0002_Allow_explicit_cluster_replication_cascades.patch
@@ -1,22 +1,18 @@
-diff --git a/src/cluster.c b/src/cluster.c
-index 6b34b72c2..b01695db7 100644
---- a/src/cluster.c
-+++ b/src/cluster.c
-@@ -6282,12 +6282,6 @@ NULL
-             return;
+diff --git a/src/cluster_legacy.c b/src/cluster_legacy.c
+index 46b63c6cd..0449ca0c9 100644
+--- a/src/cluster_legacy.c
++++ b/src/cluster_legacy.c
+@@ -6642,7 +6642,7 @@ int clusterCommandSpecial(client *c) {
          }
  
--        /* Can't replicate a slave. */
--        if (nodeIsSlave(n)) {
--            addReplyError(c,"I can only replicate a master, not a replica.");
--            return;
--        }
--
-         /* If the instance is currently a master, it should have no assigned
-          * slots nor keys to accept to replicate some other node.
-          * Slaves can switch to another master without issues. */
+         /* Can't replicate a replica. */
+-        if (nodeIsReplica(n)) {
++        if (nodeIsReplica(n) && !server.cluster_replica_no_failover) {
+             addReplyError(c, "I can only replicate a master, not a replica.");
+             return 1;
+         }
 diff --git a/tests/cluster/tests/99-yandex-cloud-patches.tcl b/tests/cluster/tests/99-yandex-cloud-patches.tcl
-index 6d0c1007b..3a0d580d9 100644
+index 6d0c1007b..04b644128 100644
 --- a/tests/cluster/tests/99-yandex-cloud-patches.tcl
 +++ b/tests/cluster/tests/99-yandex-cloud-patches.tcl
 @@ -13,8 +13,8 @@ proc kill_clustered_redis {id} {
@@ -30,11 +26,12 @@ index 6d0c1007b..3a0d580d9 100644
  }
  
  test "Cluster is up" {
-@@ -38,6 +38,15 @@ test "Replication pause on instance #1 works" {
+@@ -38,6 +38,16 @@ test "Replication pause on instance #1 works" {
      }
  }
  
 +test "Replication cascade with paused instance works" {
++    assert {[R 2 config set cluster-slave-no-failover yes] eq {OK}}
 +    assert {[R 2 cluster replicate [R 1 CLUSTER MYID]] eq {OK}}
 +    wait_for_condition 1000 50 {
 +        [RI 2 master_link_status] eq {up}
@@ -46,3 +43,6 @@ index 6d0c1007b..3a0d580d9 100644
  test "Replication resume on instance #1 works" {
      assert {[R 1 config set repl-paused no] eq {OK}}
      wait_for_condition 1000 50 {
+-- 
+2.48.0
+
diff --git a/redis_patches/0003_Add_offline_mode.patch b/valkey_patches/0003_Add_offline_mode.patch
similarity index 63%
rename from redis_patches/0003_Add_offline_mode.patch
rename to valkey_patches/0003_Add_offline_mode.patch
index 4575e82..c2573f2 100644
--- a/redis_patches/0003_Add_offline_mode.patch
+++ b/valkey_patches/0003_Add_offline_mode.patch
@@ -1,17 +1,17 @@
 diff --git a/src/config.c b/src/config.c
-index 1d720132a..a7231868c 100644
+index c1351d74f..a53cb6992 100644
 --- a/src/config.c
 +++ b/src/config.c
-@@ -628,6 +628,8 @@ void loadServerConfigFromString(char *config) {
-     if (server.config_hz < CONFIG_MIN_HZ) server.config_hz = CONFIG_MIN_HZ;
-     if (server.config_hz > CONFIG_MAX_HZ) server.config_hz = CONFIG_MAX_HZ;
+@@ -594,6 +594,8 @@ void loadServerConfigFromString(char *config) {
+     /* To ensure backward compatibility when io_threads_num is according to the previous maximum of 128. */
+     if (server.io_threads_num > IO_THREADS_MAX_NUM) server.io_threads_num = IO_THREADS_MAX_NUM;
  
 +    server.offline_initial = server.offline;
 +
-     sdsfreesplitres(lines,totlines);
+     sdsfreesplitres(lines, totlines);
      reading_config_file = 0;
      return;
-@@ -3066,6 +3068,31 @@ static int applyClientMaxMemoryUsage(const char **err) {
+@@ -3072,6 +3074,31 @@ static int applyClientMaxMemoryUsage(const char **err) {
      return 1;
  }
  
@@ -41,9 +41,9 @@ index 1d720132a..a7231868c 100644
 +}
 +
  standardConfig static_configs[] = {
+     /* clang-format off */
      /* Bool configs */
-     createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL),
-@@ -3281,6 +3308,7 @@ standardConfig static_configs[] = {
+@@ -3304,6 +3331,7 @@ standardConfig static_configs[] = {
      createSpecialConfig("bind", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigBindOption, getConfigBindOption, rewriteConfigBindOption, applyBind),
      createSpecialConfig("replicaof", "slaveof", IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, setConfigReplicaOfOption, getConfigReplicaOfOption, rewriteConfigReplicaOfOption, NULL),
      createSpecialConfig("latency-tracking-info-percentiles", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigLatencyTrackingInfoPercentilesOutputOption, getConfigLatencyTrackingInfoPercentilesOutputOption, rewriteConfigLatencyTrackingInfoPercentilesOutputOption, NULL),
@@ -52,10 +52,10 @@ index 1d720132a..a7231868c 100644
      /* NULL Terminator, this is dropped when we convert to the runtime array. */
      {NULL}
 diff --git a/src/server.c b/src/server.c
-index 438325f28..2aa63df77 100644
+index 1bdf2dd9e..f63c1f463 100644
 --- a/src/server.c
 +++ b/src/server.c
-@@ -2447,11 +2447,17 @@ int listenToPort(connListener *sfd) {
+@@ -2455,9 +2455,15 @@ int listenToPort(connListener *sfd) {
      int j;
      int port = sfd->port;
      char **bindaddr = sfd->bindaddr;
@@ -70,37 +70,42 @@ index 438325f28..2aa63df77 100644
 -    if (sfd->bindaddr_count == 0) return C_OK;
 +    if (bindaddr_count == 0) return C_OK;
  
--    for (j = 0; j < sfd->bindaddr_count; j++) {
-+    for (j = 0; j < bindaddr_count; j++) {
-         char* addr = bindaddr[j];
-         int optional = *addr == '-';
-         if (optional) addr++;
+     for (j = 0; j < sfd->bindaddr_count; j++) {
+         char *addr = bindaddr[j];
+@@ -6435,8 +6441,8 @@ void dismissMemoryInChild(void) {
+     /* madvise(MADV_DONTNEED) may not work if Transparent Huge Pages is enabled. */
+     if (server.thp_enabled) return;
+ 
+-        /* Currently we use zmadvise_dontneed only when we use jemalloc with Linux.
+-         * so we avoid these pointless loops when they're not going to do anything. */
++    /* Currently we use zmadvise_dontneed only when we use jemalloc with Linux.
++     * so we avoid these pointless loops when they're not going to do anything. */
+ #if defined(USE_JEMALLOC) && defined(__linux__)
+     listIter li;
+     listNode *ln;
+@@ -6948,7 +6954,7 @@ int main(int argc, char **argv) {
+     }
+     if (server.sentinel_mode) sentinelCheckConfigFile();
+ 
+-        /* Do system checks */
++    /* Do system checks */
+ #ifdef __linux__
+     linuxMemoryWarnings();
+     sds err_msg = NULL;
 diff --git a/src/server.h b/src/server.h
-index be2405170..05bdf5c0c 100644
+index d6dcd98fe..14e9e1fa9 100644
 --- a/src/server.h
 +++ b/src/server.h
-@@ -1908,6 +1908,9 @@ struct redisServer {
-     list *clients_waiting_acks;         /* Clients waiting in WAIT or WAITAOF. */
-     int get_ack_from_slaves;            /* If true we send REPLCONF GETACK. */
-     int repl_paused;                    /* If true we don't try to connect to master */
+@@ -2096,6 +2096,9 @@ struct valkeyServer {
+     list *clients_waiting_acks; /* Clients waiting in WAIT or WAITAOF. */
+     int get_ack_from_replicas;  /* If true we send REPLCONF GETACK. */
+     int repl_paused;            /* If true we don't try to connect to master */
 +    /* Offline mode */
-+    int offline;                        /* If true only localhost connections are accepted */
-+    int offline_initial;                /* Initial state of offline mode (from config) */
++    int offline;         /* If true only localhost connectiona are accepted */
++    int offline_initial; /* Initial state of offline mode (from config) */
      /* Limits */
-     unsigned int maxclients;            /* Max number of simultaneous clients */
-     unsigned long long maxmemory;   /* Max number of memory bytes to use */
-diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
-index ad7707a29..a5447701e 100644
---- a/tests/test_helper.tcl
-+++ b/tests/test_helper.tcl
-@@ -104,6 +104,7 @@ set ::all_tests {
-     unit/cluster/links
-     unit/cluster/cluster-response-tls
-     integration/yandex-cloud-patches
-+    unit/yandex-cloud-patches
- }
- # Index to the next test to run in the ::all_tests list.
- set ::next_test 0
+     unsigned int maxclients;                    /* Max number of simultaneous clients */
+     unsigned long long maxmemory;               /* Max number of memory bytes to use */
 diff --git a/tests/unit/yandex-cloud-patches.tcl b/tests/unit/yandex-cloud-patches.tcl
 new file mode 100644
 index 000000000..b8c3ba453
@@ -130,3 +135,6 @@ index 000000000..b8c3ba453
 +        }
 +    }
 +}
+-- 
+2.48.0
+
diff --git a/redis_patches/0004_Add_waitquorum_command.patch b/valkey_patches/0004_Add_waitquorum_command.patch
similarity index 67%
rename from redis_patches/0004_Add_waitquorum_command.patch
rename to valkey_patches/0004_Add_waitquorum_command.patch
index 583f4e7..52240f1 100644
--- a/redis_patches/0004_Add_waitquorum_command.patch
+++ b/valkey_patches/0004_Add_waitquorum_command.patch
@@ -1,8 +1,8 @@
 diff --git a/src/commands.def b/src/commands.def
-index 10fbd9f4a..ce071e85a 100644
+index cd9f8e298..6deb5a6cf 100644
 --- a/src/commands.def
 +++ b/src/commands.def
-@@ -2703,6 +2703,26 @@ struct COMMAND_ARG WAITAOF_Args[] = {
+@@ -2791,6 +2791,26 @@ struct COMMAND_ARG WAITAOF_Args[] = {
  {MAKE_ARG("timeout",ARG_TYPE_INTEGER,-1,NULL,NULL,NULL,CMD_ARG_NONE,0,NULL)},
  };
  
@@ -29,17 +29,17 @@ index 10fbd9f4a..ce071e85a 100644
  /********** GEOADD ********************/
  
  #ifndef SKIP_CMD_HISTORY_TABLE
-@@ -10688,6 +10708,7 @@ struct COMMAND_STRUCT redisCommandTable[] = {
+@@ -10908,6 +10928,7 @@ struct COMMAND_STRUCT serverCommandTable[] = {
  {MAKE_CMD("unlink","Asynchronously deletes one or more keys.","O(1) for each key removed regardless of its size. Then the command does O(N) work in a different thread in order to reclaim memory, where N is the number of allocations the deleted objects where composed of.","4.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,UNLINK_History,0,UNLINK_Tips,2,unlinkCommand,-2,CMD_WRITE|CMD_FAST,ACL_CATEGORY_KEYSPACE,UNLINK_Keyspecs,1,NULL,1),.args=UNLINK_Args},
- {MAKE_CMD("wait","Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAIT_History,0,WAIT_Tips,2,waitCommand,3,0,ACL_CATEGORY_CONNECTION,WAIT_Keyspecs,0,NULL,2),.args=WAIT_Args},
- {MAKE_CMD("waitaof","Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the master and/or replicas.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITAOF_History,0,WAITAOF_Tips,2,waitaofCommand,4,CMD_NOSCRIPT,ACL_CATEGORY_CONNECTION,WAITAOF_Keyspecs,0,NULL,3),.args=WAITAOF_Args},
-+{MAKE_CMD("waitquorum","Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed on replicas quorum.","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITQUORUM_History,0,WAITQUORUM_Tips,2,waitquorumCommand,1,0,ACL_CATEGORY_CONNECTION,WAITQUORUM_Keyspecs,0,NULL,0)},
+ {MAKE_CMD("wait","Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed.","O(1)","3.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAIT_History,0,WAIT_Tips,2,waitCommand,3,CMD_BLOCKING,ACL_CATEGORY_CONNECTION,WAIT_Keyspecs,0,NULL,2),.args=WAIT_Args},
+ {MAKE_CMD("waitaof","Blocks until all of the preceding write commands sent by the connection are written to the append-only file of the primary and/or replicas.","O(1)","7.2.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITAOF_History,0,WAITAOF_Tips,2,waitaofCommand,4,CMD_BLOCKING,ACL_CATEGORY_CONNECTION,WAITAOF_Keyspecs,0,NULL,3),.args=WAITAOF_Args},
++{MAKE_CMD("waitquorum","Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed on replicas quorum.","O(1)","8.0.0",CMD_DOC_NONE,NULL,NULL,"generic",COMMAND_GROUP_GENERIC,WAITQUORUM_History,0,WAITQUORUM_Tips,2,waitquorumCommand,1,0,ACL_CATEGORY_CONNECTION,WAITQUORUM_Keyspecs,0,NULL,0)},
  /* geo */
  {MAKE_CMD("geoadd","Adds one or more members to a geospatial index. The key is created if it doesn't exist.","O(log(N)) for each item added, where N is the number of elements in the sorted set.","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEOADD_History,1,GEOADD_Tips,0,geoaddCommand,-5,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_GEO,GEOADD_Keyspecs,1,NULL,4),.args=GEOADD_Args},
  {MAKE_CMD("geodist","Returns the distance between two members of a geospatial index.","O(1)","3.2.0",CMD_DOC_NONE,NULL,NULL,"geo",COMMAND_GROUP_GEO,GEODIST_History,0,GEODIST_Tips,0,geodistCommand,-4,CMD_READONLY,ACL_CATEGORY_GEO,GEODIST_Keyspecs,1,NULL,4),.args=GEODIST_Args},
 diff --git a/src/commands/waitquorum.json b/src/commands/waitquorum.json
 new file mode 100644
-index 000000000..dca11ab67
+index 000000000..18b21012d
 --- /dev/null
 +++ b/src/commands/waitquorum.json
 @@ -0,0 +1,24 @@
@@ -48,7 +48,7 @@ index 000000000..dca11ab67
 +        "summary": "Blocks until the asynchronous replication of all preceding write commands sent by the connection is completed on replicas quorum.",
 +        "complexity": "O(1)",
 +        "group": "generic",
-+        "since": "6.2.0",
++        "since": "8.0.0",
 +        "arity": 1,
 +        "function": "waitquorumCommand",
 +        "command_flags": [
@@ -68,10 +68,10 @@ index 000000000..dca11ab67
 +    }
 +}
 diff --git a/src/config.c b/src/config.c
-index a7231868c..6939dc893 100644
+index a53cb6992..35524be4e 100644
 --- a/src/config.c
 +++ b/src/config.c
-@@ -3093,6 +3093,78 @@ static void rewriteConfigOfflineMode(standardConfig *config, const char *name, s
+@@ -3099,6 +3099,79 @@ static void rewriteConfigOfflineMode(standardConfig *config, const char *name, s
      rewriteConfigYesNoOption(state, name, server.offline_initial, 0);
  }
  
@@ -115,7 +115,8 @@ index a7231868c..6939dc893 100644
 +    return ret;
 +}
 +
-+static void rewriteConfigQuorumReplicasOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
++static void
++rewriteConfigQuorumReplicasOption(standardConfig *config, const char *name, struct rewriteConfigState *state) {
 +    UNUSED(config);
 +    sds line;
 +    sds replicas = sdsempty();
@@ -148,9 +149,9 @@ index a7231868c..6939dc893 100644
 +}
 +
  standardConfig static_configs[] = {
+     /* clang-format off */
      /* Bool configs */
-     createBoolConfig("rdbchecksum", NULL, IMMUTABLE_CONFIG, server.rdb_checksum, 1, NULL, NULL),
-@@ -3230,6 +3302,7 @@ standardConfig static_configs[] = {
+@@ -3249,6 +3322,7 @@ standardConfig static_configs[] = {
      createIntConfig("watchdog-period", NULL, MODIFIABLE_CONFIG | HIDDEN_CONFIG, 0, INT_MAX, server.watchdog_period, 0, INTEGER_CONFIG, NULL, updateWatchdogPeriod),
      createIntConfig("shutdown-timeout", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.shutdown_timeout, 10, INTEGER_CONFIG, NULL, NULL),
      createIntConfig("repl-diskless-sync-max-replicas", NULL, MODIFIABLE_CONFIG, 0, INT_MAX, server.repl_diskless_sync_max_replicas, 0, INTEGER_CONFIG, NULL, NULL),
@@ -158,7 +159,7 @@ index a7231868c..6939dc893 100644
  
      /* Unsigned int configs */
      createUIntConfig("maxclients", NULL, MODIFIABLE_CONFIG, 1, UINT_MAX, server.maxclients, 10000, INTEGER_CONFIG, NULL, updateMaxclients),
-@@ -3309,6 +3382,7 @@ standardConfig static_configs[] = {
+@@ -3332,6 +3406,7 @@ standardConfig static_configs[] = {
      createSpecialConfig("replicaof", "slaveof", IMMUTABLE_CONFIG | MULTI_ARG_CONFIG, setConfigReplicaOfOption, getConfigReplicaOfOption, rewriteConfigReplicaOfOption, NULL),
      createSpecialConfig("latency-tracking-info-percentiles", NULL, MODIFIABLE_CONFIG | MULTI_ARG_CONFIG, setConfigLatencyTrackingInfoPercentilesOutputOption, getConfigLatencyTrackingInfoPercentilesOutputOption, rewriteConfigLatencyTrackingInfoPercentilesOutputOption, NULL),
      createSpecialConfig("offline", NULL, MODIFIABLE_CONFIG, setOfflineMode, getOfflineMode, rewriteConfigOfflineMode, applyBind),
@@ -167,24 +168,24 @@ index a7231868c..6939dc893 100644
      /* NULL Terminator, this is dropped when we convert to the runtime array. */
      {NULL}
 diff --git a/src/networking.c b/src/networking.c
-index 7696e8c28..82c6f2b97 100644
+index ed4f3582b..27c4d6a92 100644
 --- a/src/networking.c
 +++ b/src/networking.c
-@@ -171,6 +171,7 @@ client *createClient(connection *conn) {
+@@ -175,6 +175,7 @@ client *createClient(connection *conn) {
      c->duration = 0;
      clientSetDefaultAuth(c);
-     c->replstate = REPL_STATE_NONE;
+     c->repl_state = REPL_STATE_NONE;
 +    c->quorum = 0;
      c->repl_start_cmd_stream_on_ack = 0;
      c->reploff = 0;
      c->read_reploff = 0;
 diff --git a/src/replication.c b/src/replication.c
-index 47172dba3..04f0c29bb 100644
+index a1ec23f38..05fe5b542 100644
 --- a/src/replication.c
 +++ b/src/replication.c
-@@ -1285,7 +1285,8 @@ void replconfCommand(client *c) {
+@@ -1430,7 +1430,8 @@ void replconfCommand(client *c) {
   * It does a few things:
-  * 1) Put the slave in ONLINE state.
+  * 1) Put the replica in ONLINE state.
   * 2) Update the count of "good replicas".
 - * 3) Trigger the module event.
 + * 3) Update the quorum replicas state
@@ -192,15 +193,15 @@ index 47172dba3..04f0c29bb 100644
   *
   * the return value indicates that the replica should be disconnected.
   * */
-@@ -1302,6 +1303,7 @@ int replicaPutOnline(client *slave) {
-     slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */
+@@ -1446,6 +1447,7 @@ int replicaPutOnline(client *replica) {
+     replica->repl_ack_time = server.unixtime; /* Prevent false timeout. */
  
-     refreshGoodSlavesCount();
+     refreshGoodReplicasCount();
 +    updateQuorumReplicas();
      /* Fire the replica change modules event. */
-     moduleFireServerEvent(REDISMODULE_EVENT_REPLICA_CHANGE,
-                           REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE,
-@@ -3523,6 +3525,23 @@ int replicationCountAOFAcksByOffset(long long offset) {
+     moduleFireServerEvent(VALKEYMODULE_EVENT_REPLICA_CHANGE, VALKEYMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE, NULL);
+     serverLog(LL_NOTICE, "Synchronization with replica %s succeeded", replicationGetReplicaName(replica));
+@@ -4305,6 +4307,23 @@ int replicationCountAOFAcksByOffset(long long offset) {
      return count;
  }
  
@@ -211,12 +212,12 @@ index 47172dba3..04f0c29bb 100644
 +    listNode *ln;
 +    int count = 0;
 +
-+    listRewind(server.slaves,&li);
-+    while((ln = listNext(&li))) {
-+        client *slave = ln->value;
++    listRewind(server.replicas, &li);
++    while ((ln = listNext(&li))) {
++        client *replica = ln->value;
 +
-+        if (slave->replstate != SLAVE_STATE_ONLINE || slave->quorum != 1) continue;
-+        if (slave->repl_ack_off >= offset) count++;
++        if (replica->repl_state != REPLICA_STATE_ONLINE || replica->quorum != 1) continue;
++        if (replica->repl_ack_off >= offset) count++;
 +    }
 +    return count;
 +}
@@ -224,8 +225,8 @@ index 47172dba3..04f0c29bb 100644
  /* WAIT for N replicas to acknowledge the processing of our latest
   * write command (and all the previous commands). */
  void waitCommand(client *c) {
-@@ -3557,6 +3576,62 @@ void waitCommand(client *c) {
-     replicationRequestAckFromSlaves();
+@@ -4339,6 +4358,64 @@ void waitCommand(client *c) {
+     replicationRequestAckFromReplicas();
  }
  
 +/* WAITQUORUM for quorum-replicas-to-write quorum replicas to acknowledge the processing of our latest
@@ -234,26 +235,28 @@ index 47172dba3..04f0c29bb 100644
 +    long ackreplicas;
 +    long long offset = c->woff;
 +
-+    if (server.masterhost) {
-+        addReplyError(c,"WAITQUORUM cannot be used with replica instances. Please also note that since Redis 4.0 if a replica is configured to be writable (which is not the default) writes to replicas are just local and are not propagated.");
++    if (server.primary_host) {
++        addReplyError(c, "WAITQUORUM cannot be used with replica instances. Please also note that since Redis 4.0 if a "
++                         "replica is configured to be writable (which is not the default) writes to replicas are just "
++                         "local and are not propagated.");
 +        return;
 +    }
 +
 +    /* First try without blocking at all. */
 +    ackreplicas = replicationCountQuorumAcksByOffset(c->woff);
-+    if (ackreplicas >= server.quorum_replicas_to_write || c->flags & CLIENT_DENY_BLOCKING) {
-+        addReplyLongLong(c,ackreplicas);
++    if (ackreplicas >= server.quorum_replicas_to_write || c->flag.deny_blocking) {
++        addReplyLongLong(c, ackreplicas);
 +        return;
 +    }
 +
 +    /* Otherwise block the client and put it into our list of clients
 +     * waiting for ack from slaves. */
-+    blockForReplication(c,0,offset,server.quorum_replicas_to_write);
++    blockClientForReplicaAck(c, 0, offset, server.quorum_replicas_to_write, 0);
 +    c->bstate.quorum = 1;
 +
 +    /* Make sure that the server will send an ACK request to all the slaves
 +     * before returning to the event loop. */
-+    replicationRequestAckFromSlaves();
++    replicationRequestAckFromReplicas();
 +}
 +
 +/* This function updates connected replicas state to mark quorum ones
@@ -262,32 +265,32 @@ index 47172dba3..04f0c29bb 100644
 +    listIter li;
 +    listNode *ln;
 +
-+    if (server.slaves == NULL || listLength(server.slaves) == 0) {
++    if (server.replicas == NULL || listLength(server.replicas) == 0) {
 +        return;
 +    }
 +
-+    listRewind(server.slaves,&li);
-+    while((ln = listNext(&li))) {
-+        client *slave = ln->value;
++    listRewind(server.replicas, &li);
++    while ((ln = listNext(&li))) {
++        client *replica = ln->value;
 +        int config = 0;
-+        char *name = replicationGetSlaveName(slave);
++        char *name = replicationGetReplicaName(replica);
 +
-+        if (slave->replstate != SLAVE_STATE_ONLINE) {
-+            slave->quorum = 0;
++        if (replica->repl_state != REPLICA_STATE_ONLINE) {
++            replica->quorum = 0;
 +            continue;
 +        }
 +
 +        if (dictFind(server.quorum_replicas, name)) {
 +            config = 1;
 +        }
-+        slave->quorum = config;
++        replica->quorum = config;
 +    }
 +}
 +
- /* WAIT for N replicas and / or local master to acknowledge our latest
+ /* WAIT for N replicas and / or local primary to acknowledge our latest
   * write command got synced to the disk. */
  void waitaofCommand(client *c) {
-@@ -3615,8 +3690,10 @@ void unblockClientWaitingReplicas(client *c) {
+@@ -4397,8 +4474,10 @@ void unblockClientWaitingReplicas(client *c) {
  void processClientsWaitingReplicas(void) {
      long long last_offset = 0;
      long long last_aof_offset = 0;
@@ -298,30 +301,26 @@ index 47172dba3..04f0c29bb 100644
  
      listIter li;
      listNode *ln;
-@@ -3639,19 +3716,27 @@ void processClientsWaitingReplicas(void) {
+@@ -4421,16 +4500,25 @@ void processClientsWaitingReplicas(void) {
           * offset and number of replicas, we remember it so the next client
           * may be unblocked without calling replicationCountAcksByOffset()
           * or calling replicationCountAOFAcksByOffset()
 +         * or calling replicationCountQuorumAcksByOffset()
           * if the requested offset / replicas were equal or less. */
 -        if (!is_wait_aof && last_offset && last_offset >= c->bstate.reploffset &&
--                           last_numreplicas >= c->bstate.numreplicas)
+-            last_numreplicas >= c->bstate.numreplicas) {
 +        if (c->bstate.quorum == 1 && last_quorum_offset && last_quorum_offset >= c->bstate.reploffset &&
-+                                                           last_quorum_numreplicas >= c->bstate.numreplicas) {
++            last_quorum_numreplicas >= c->bstate.numreplicas) {
 +            numreplicas = last_quorum_numreplicas;
 +        } else if (!is_wait_aof && c->bstate.quorum == 0 && last_offset && last_offset >= c->bstate.reploffset &&
-+                                                                           last_numreplicas >= c->bstate.numreplicas)
-         {
++                   last_numreplicas >= c->bstate.numreplicas) {
              numreplicas = last_numreplicas;
          } else if (is_wait_aof && last_aof_offset && last_aof_offset >= c->bstate.reploffset &&
--                    last_aof_numreplicas >= c->bstate.numreplicas)
-+                                                     last_aof_numreplicas >= c->bstate.numreplicas)
-         {
+                    last_aof_numreplicas >= c->bstate.numreplicas) {
              numreplicas = last_aof_numreplicas;
          } else {
--            numreplicas = is_wait_aof ?
--                replicationCountAOFAcksByOffset(c->bstate.reploffset) :
--                replicationCountAcksByOffset(c->bstate.reploffset);
+-            numreplicas = is_wait_aof ? replicationCountAOFAcksByOffset(c->bstate.reploffset)
+-                                      : replicationCountAcksByOffset(c->bstate.reploffset);
 +            if (is_wait_aof) {
 +                numreplicas = replicationCountAOFAcksByOffset(c->bstate.reploffset);
 +            } else if (c->bstate.quorum == 1) {
@@ -332,7 +331,7 @@ index 47172dba3..04f0c29bb 100644
  
              /* Check if the number of replicas is satisfied. */
              if (numreplicas < c->bstate.numreplicas) continue;
-@@ -3659,6 +3744,9 @@ void processClientsWaitingReplicas(void) {
+@@ -4438,6 +4526,9 @@ void processClientsWaitingReplicas(void) {
              if (is_wait_aof) {
                  last_aof_offset = c->bstate.reploffset;
                  last_aof_numreplicas = numreplicas;
@@ -343,67 +342,67 @@ index 47172dba3..04f0c29bb 100644
                  last_offset = c->bstate.reploffset;
                  last_numreplicas = numreplicas;
 diff --git a/src/server.c b/src/server.c
-index 2aa63df77..4b1f4242a 100644
+index f63c1f463..57d7e32db 100644
 --- a/src/server.c
 +++ b/src/server.c
-@@ -2091,6 +2091,8 @@ void initServerConfig(void) {
-     server.master_repl_offset = 0;
-     server.fsynced_reploff_pending = 0;
+@@ -2089,6 +2089,8 @@ void initServerConfig(void) {
+     server.loading_process_events_interval_ms = LOADING_PROCESS_EVENTS_INTERVAL_DEFAULT;
+     server.loading_rio = NULL;
  
 +    server.quorum_replicas = dictCreate(&stringSetDictType);
 +
      /* Replication partial resync backlog */
      server.repl_backlog = NULL;
-     server.repl_no_slaves_since = time(NULL);
-@@ -6109,9 +6111,9 @@ sds genRedisInfoString(dict *section_dict, int all_sections, int everything) {
+     server.repl_no_replicas_since = time(NULL);
+@@ -5882,12 +5884,13 @@ sds genValkeyInfoString(dict *section_dict, int all_sections, int everything) {
  
                  info = sdscatprintf(info,
-                     "slave%d:ip=%s,port=%d,state=%s,"
--                    "offset=%lld,lag=%ld\r\n",
-+                    "offset=%lld,lag=%ld,quorum=%d\r\n",
-                     slaveid,slaveip,slave->slave_listening_port,state,
--                    slave->repl_ack_off, lag);
-+                    slave->repl_ack_off, lag, slave->quorum);
-                 slaveid++;
+                                     "slave%d:ip=%s,port=%d,state=%s,"
+-                                    "offset=%lld,lag=%ld,type=%s\r\n",
++                                    "offset=%lld,lag=%ld,type=%s,quorum=%d\r\n",
+                                     replica_id, replica_ip, replica->replica_listening_port, state,
+                                     replica->repl_ack_off, lag,
+                                     replica->flag.repl_rdb_channel                     ? "rdb-channel"
+                                     : replica->repl_state == REPLICA_STATE_BG_RDB_LOAD ? "main-channel"
+-                                                                                       : "replica");
++                                                                                       : "replica",
++                                    replica->quorum);
+                 replica_id++;
              }
          }
 diff --git a/src/server.h b/src/server.h
-index 320d5eeba..18c85d3dc 100644
+index 14e9e1fa9..0023ceaa0 100644
 --- a/src/server.h
 +++ b/src/server.h
-@@ -1030,6 +1030,7 @@ typedef struct blockingState {
-     int numreplicas;        /* Number of replicas we are waiting for ACK. */
-     int numlocal;           /* Indication if WAITAOF is waiting for local fsync. */
-     long long reploffset;   /* Replication offset to reach. */
-+    int quorum;             /* Indication if WAITQUORUM is waiting for quorum. */
+@@ -1025,6 +1025,7 @@ typedef struct blockingState {
  
-     /* BLOCKED_MODULE */
-     void *module_blocked_handle; /* RedisModuleBlockedClient structure.
-@@ -1191,6 +1192,7 @@ typedef struct client {
+     /* BLOCKED_WAIT and BLOCKED_WAITAOF */
+     int numreplicas;      /* Number of replicas we are waiting for ACK. */
++    int quorum;           /* Indication if WAITQUORUM is waiting for quorum. */
+     int numlocal;         /* Indication if WAITAOF is waiting for local fsync. */
+     long long reploffset; /* Replication offset to reach. */
+ 
+@@ -1300,6 +1301,7 @@ typedef struct client {
      time_t obuf_soft_limit_reached_time;
-     int authenticated;      /* Needed when the default user requires auth. */
-     int replstate;          /* Replication state if this is a slave. */
-+    int quorum;             /* Replica quorum flag. */
-     int repl_start_cmd_stream_on_ack; /* Install slave write handler on first ACK. */
-     int repldbfd;           /* Replication DB file descriptor. */
-     off_t repldboff;        /* Replication DB file offset. */
-@@ -1597,8 +1599,10 @@ struct redisServer {
-     list *clients_pending_write; /* There is to write or install handler. */
-     list *clients_pending_read;  /* Client has pending read socket buffers. */
-     list *slaves, *monitors;    /* List of slaves and MONITORs */
--    client *current_client;     /* The client that triggered the command execution (External or AOF). */
--    client *executing_client;   /* The client executing the current command (possibly script or module). */
-+    client *current_client;       /* The client that triggered the command execution (External or AOF). */
-+    client *executing_client;     /* The client executing the current command (possibly script or module). */
-+    dict *quorum_replicas;        /* Replicas that should participate in quorum commit */
-+    int quorum_replicas_to_write; /* Num replicas to accept write before returning from WAITQUORUM command */
+     int repl_state;                      /* Replication state if this is a replica. */
+     int repl_start_cmd_stream_on_ack;    /* Install replica write handler on first ACK. */
++    int quorum;                          /* Replica quorum flag. */
+     int repldbfd;                        /* Replication DB file descriptor. */
+     off_t repldboff;                     /* Replication DB file offset. */
+     off_t repldbsize;                    /* Replication DB file size. */
+@@ -1742,6 +1744,8 @@ struct valkeyServer {
+                                             * RDB transfer until their main channel establishes partial synchronization. */
+     client *current_client;                /* The client that triggered the command execution (External or AOF). */
+     client *executing_client;              /* The client executing the current command (possibly script or module). */
++    dict *quorum_replicas;                 /* Replicas that should participate in quorum commit */
++    int quorum_replicas_to_write;          /* Num replicas to accept qourum before returning from WAITQUORUM command */
  
  #ifdef LOG_REQ_RES
-     char *req_res_logfile; /* Path of log file for logging all requests and their replies. If NULL, no logging will be performed */
-@@ -2804,11 +2808,13 @@ void resizeReplicationBacklog(void);
- void replicationSetMaster(char *ip, int port);
- void replicationUnsetMaster(void);
- void refreshGoodSlavesCount(void);
+     char *req_res_logfile; /* Path of log file for logging all requests and their replies. If NULL, no logging will be
+@@ -3059,11 +3063,13 @@ void resizeReplicationBacklog(void);
+ void replicationSetPrimary(char *ip, int port, int full_sync_required);
+ void replicationUnsetPrimary(void);
+ void refreshGoodReplicasCount(void);
 +void updateQuorumReplicas(void);
  int checkGoodReplicasStatus(void);
  void processClientsWaitingReplicas(void);
@@ -411,10 +410,10 @@ index 320d5eeba..18c85d3dc 100644
  int replicationCountAcksByOffset(long long offset);
  int replicationCountAOFAcksByOffset(long long offset);
 +int replicationCountQuorumAcksByOffset(long long offset);
- void replicationSendNewlineToMaster(void);
- long long replicationGetSlaveOffset(void);
- char *replicationGetSlaveName(client *c);
-@@ -3654,6 +3660,7 @@ void bitposCommand(client *c);
+ void replicationSendNewlineToPrimary(void);
+ long long replicationGetReplicaOffset(void);
+ char *replicationGetReplicaName(client *c);
+@@ -3954,6 +3960,7 @@ void bitposCommand(client *c);
  void replconfCommand(client *c);
  void waitCommand(client *c);
  void waitaofCommand(client *c);
@@ -423,7 +422,7 @@ index 320d5eeba..18c85d3dc 100644
  void georadiusbymemberroCommand(client *c);
  void georadiusCommand(client *c);
 diff --git a/tests/unit/yandex-cloud-patches.tcl b/tests/unit/yandex-cloud-patches.tcl
-index b8c3ba453..43ea52352 100644
+index b8c3ba453..2cb4cdca5 100644
 --- a/tests/unit/yandex-cloud-patches.tcl
 +++ b/tests/unit/yandex-cloud-patches.tcl
 @@ -21,3 +21,67 @@ start_server {config "minimal.conf" tags {"external:skip"}} {
@@ -464,8 +463,8 @@ index b8c3ba453..43ea52352 100644
 +    }
 +
 +    test {WAITQUORUM replica multiple clients unblock - reuse last result} {
-+        set rd [redis_deferring_client -1]
-+        set rd2 [redis_deferring_client -1]
++        set rd [valkey_deferring_client -1]
++        set rd2 [valkey_deferring_client -1]
 +
 +        pause_process $slave_pid
 +
@@ -495,5 +494,5 @@ index b8c3ba453..43ea52352 100644
 +}
 +}
 -- 
-2.47.1
+2.48.0
 
diff --git a/redis_patches/0005_Add_senticache.patch b/valkey_patches/0005_Add_senticache.patch
similarity index 51%
rename from redis_patches/0005_Add_senticache.patch
rename to valkey_patches/0005_Add_senticache.patch
index 16f96cb..b0f8129 100644
--- a/redis_patches/0005_Add_senticache.patch
+++ b/valkey_patches/0005_Add_senticache.patch
@@ -1,15 +1,15 @@
 diff --git a/.gitignore b/.gitignore
-index 5ed94f1da..9e5ad02b0 100644
+index 3175ad4b4..3a9a8970c 100644
 --- a/.gitignore
 +++ b/.gitignore
-@@ -11,6 +11,7 @@ redis-check-rdb
- redis-check-dump
- redis-cli
- redis-sentinel
-+redis-senticache
- redis-server
+@@ -12,6 +12,7 @@ dump*.rdb
+ *-check-dump
+ *-cli
+ *-sentinel
++*-senticache
+ *-server
+ *-unit-tests
  doc-tools
- release
 diff --git a/runtest-senticache b/runtest-senticache
 new file mode 100755
 index 000000000..ce860dc43
@@ -32,85 +32,85 @@ index 000000000..ce860dc43
 +$TCLSH tests/senticache/run.tcl $*
 diff --git a/senticache.conf b/senticache.conf
 new file mode 100644
-index 000000000..782a1b0ba
+index 000000000..5e922cd9f
 --- /dev/null
 +++ b/senticache.conf
 @@ -0,0 +1,9 @@
 +protected-mode no
 +port 26379
 +daemonize no
-+pidfile "./redis-senticache.pid"
++pidfile "./valkey-senticache.pid"
 +logfile ""
 +dir "/tmp"
 +acllog-max-len 128
 +sentinel resolve-hostnames no
 +sentinel announce-hostnames no
 diff --git a/src/Makefile b/src/Makefile
-index ecbd2753d..245e38703 100644
+index 020b70d6d..e0eb01a9c 100644
 --- a/src/Makefile
 +++ b/src/Makefile
-@@ -345,7 +345,9 @@ endif
+@@ -423,7 +423,9 @@ endif
+ ENGINE_NAME=valkey
+ SERVER_NAME=$(ENGINE_NAME)-server$(PROG_SUFFIX)
+ ENGINE_SENTINEL_NAME=$(ENGINE_NAME)-sentinel$(PROG_SUFFIX)
++ENGINE_SENTICACHE_NAME=$(ENGINE_NAME)-senticache$(PROG_SUFFIX)
+ ENGINE_SERVER_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o memory_prefetch.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o
++ENGINE_SENTICACHE_OBJ=threads_mngr.o adlist.o quicklist.o ae.o anet.o dict.o kvstore.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o memory_prefetch.o io_threads.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o cluster_legacy.o cluster_slot_stats.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crccombine.o crc64.o bitops.o senticache.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o valkey-check-rdb.o valkey-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o
+ ENGINE_CLI_NAME=$(ENGINE_NAME)-cli$(PROG_SUFFIX)
+ ENGINE_CLI_OBJ=anet.o adlist.o dict.o valkey-cli.o zmalloc.o release.o ae.o serverassert.o crcspeed.o crccombine.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o cli_commands.o
+ ENGINE_BENCHMARK_NAME=$(ENGINE_NAME)-benchmark$(PROG_SUFFIX)
+@@ -436,7 +438,7 @@ ENGINE_TEST_OBJ:=$(sort $(patsubst unit/%.c,unit/%.o,$(ENGINE_TEST_FILES)))
+ ENGINE_UNIT_TESTS:=$(ENGINE_NAME)-unit-tests$(PROG_SUFFIX)
+ ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(ENGINE_SERVER_OBJ) $(ENGINE_CLI_OBJ) $(ENGINE_BENCHMARK_OBJ)))
  
- REDIS_SERVER_NAME=redis-server$(PROG_SUFFIX)
- REDIS_SENTINEL_NAME=redis-sentinel$(PROG_SUFFIX)
-+REDIS_SENTICACHE_NAME=redis-senticache$(PROG_SUFFIX)
- REDIS_SERVER_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crc64.o bitops.o sentinel.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o
-+REDIS_SENTICACHE_OBJ=adlist.o quicklist.o ae.o anet.o dict.o server.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o eval.o bio.o rio.o rand.o memtest.o syscheck.o crcspeed.o crc64.o bitops.o senticache.o notify.o setproctitle.o blocked.o hyperloglog.o latency.o sparkline.o redis-check-rdb.o redis-check-aof.o geo.o lazyfree.o module.o evict.o expire.o geohash.o geohash_helper.o childinfo.o defrag.o siphash.o rax.o t_stream.o listpack.o localtime.o lolwut.o lolwut5.o lolwut6.o acl.o tracking.o socket.o tls.o sha256.o timeout.o setcpuaffinity.o monotonic.o mt19937-64.o resp_parser.o call_reply.o script_lua.o script.o functions.o function_lua.o commands.o strl.o connection.o unix.o logreqres.o
- REDIS_CLI_NAME=redis-cli$(PROG_SUFFIX)
- REDIS_CLI_OBJ=anet.o adlist.o dict.o redis-cli.o zmalloc.o release.o ae.o redisassert.o crcspeed.o crc64.o siphash.o crc16.o monotonic.o cli_common.o mt19937-64.o strl.o cli_commands.o
- REDIS_BENCHMARK_NAME=redis-benchmark$(PROG_SUFFIX)
-@@ -354,7 +356,7 @@ REDIS_CHECK_RDB_NAME=redis-check-rdb$(PROG_SUFFIX)
- REDIS_CHECK_AOF_NAME=redis-check-aof$(PROG_SUFFIX)
- ALL_SOURCES=$(sort $(patsubst %.o,%.c,$(REDIS_SERVER_OBJ) $(REDIS_CLI_OBJ) $(REDIS_BENCHMARK_OBJ)))
- 
--all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(TLS_MODULE)
-+all: $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) $(TLS_MODULE) $(REDIS_SENTICACHE_NAME)
+-all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) $(RDMA_MODULE)
++all: $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_SENTICACHE_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(TLS_MODULE) $(RDMA_MODULE)
  	@echo ""
  	@echo "Hint: It's a good idea to run 'make test' ;)"
  	@echo ""
-@@ -406,6 +408,10 @@ $(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ)
- $(REDIS_SENTINEL_NAME): $(REDIS_SERVER_NAME)
- 	$(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME)
+@@ -500,6 +502,10 @@ $(ENGINE_UNIT_TESTS): $(ENGINE_TEST_OBJ) $(ENGINE_LIB_NAME)
+ $(ENGINE_SENTINEL_NAME): $(SERVER_NAME)
+ 	$(ENGINE_INSTALL) $(SERVER_NAME) $(ENGINE_SENTINEL_NAME)
  
-+# redis-senticache
-+$(REDIS_SENTICACHE_NAME): $(REDIS_SENTICACHE_OBJ)
-+	$(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS)
-+
- # redis-check-rdb
- $(REDIS_CHECK_RDB_NAME): $(REDIS_SERVER_NAME)
- 	$(REDIS_INSTALL) $(REDIS_SERVER_NAME) $(REDIS_CHECK_RDB_NAME)
-@@ -445,7 +451,7 @@ endif
++# valkey-senticache
++$(ENGINE_SENTICACHE_NAME): $(ENGINE_SENTICACHE_OBJ)
++	$(SERVER_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/lua/src/liblua.a ../deps/hdr_histogram/libhdrhistogram.a ../deps/fpconv/libfpconv.a $(FINAL_LIBS)
++
+ # valkey-check-rdb
+ $(ENGINE_CHECK_RDB_NAME): $(SERVER_NAME)
+ 	$(ENGINE_INSTALL) $(SERVER_NAME) $(ENGINE_CHECK_RDB_NAME)
+@@ -556,7 +562,7 @@ endif
  commands.c: $(COMMANDS_DEF_FILENAME).def
  
  clean:
--	rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep *.so
-+	rm -rf $(REDIS_SERVER_NAME) $(REDIS_SENTINEL_NAME) $(REDIS_SENTICACHE_NAME) $(REDIS_CLI_NAME) $(REDIS_BENCHMARK_NAME) $(REDIS_CHECK_RDB_NAME) $(REDIS_CHECK_AOF_NAME) *.o *.gcda *.gcno *.gcov redis.info lcov-html Makefile.dep *.so
+-	rm -rf $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(ENGINE_UNIT_TESTS) $(ENGINE_LIB_NAME) unit/*.o unit/*.d *.o *.gcda *.gcno *.gcov valkey.info lcov-html Makefile.dep *.so
++	rm -rf $(SERVER_NAME) $(ENGINE_SENTINEL_NAME) $(ENGINE_SENTICACHE_NAME) $(ENGINE_CLI_NAME) $(ENGINE_BENCHMARK_NAME) $(ENGINE_CHECK_RDB_NAME) $(ENGINE_CHECK_AOF_NAME) $(ENGINE_UNIT_TESTS) $(ENGINE_LIB_NAME) unit/*.o unit/*.d *.o *.gcda *.gcno *.gcov valkey.info lcov-html Makefile.dep *.so
  	rm -f $(DEP)
  
  .PHONY: clean
-@@ -467,6 +473,9 @@ test-modules: $(REDIS_SERVER_NAME)
- test-sentinel: $(REDIS_SENTINEL_NAME) $(REDIS_CLI_NAME)
+@@ -581,6 +587,9 @@ test-modules: $(SERVER_NAME)
+ test-sentinel: $(ENGINE_SENTINEL_NAME) $(ENGINE_CLI_NAME)
  	@(cd ..; ./runtest-sentinel)
  
-+test-senticache: $(REDIS_SENTICACHE_NAME) $(REDIS_CLI_NAME)
++test-senticache: $(ENGINE_SENTICACHE_NAME) $(ENGINE_CLI_NAME)
 +	@(cd ..; ./runtest-senticache)
 +
- test-cluster: $(REDIS_SERVER_NAME) $(REDIS_CLI_NAME)
+ test-cluster: $(SERVER_NAME) $(ENGINE_CLI_NAME)
  	@(cd ..; ./runtest-cluster)
  
-@@ -504,6 +513,7 @@ helgrind:
+@@ -619,6 +628,7 @@ helgrind:
  install: all
  	@mkdir -p $(INSTALL_BIN)
- 	$(call MAKE_INSTALL,$(REDIS_SERVER_NAME),$(INSTALL_BIN))
-+	$(call MAKE_INSTALL,$(REDIS_SENTICACHE_NAME),$(INSTALL_BIN))
- 	$(call MAKE_INSTALL,$(REDIS_BENCHMARK_NAME),$(INSTALL_BIN))
- 	$(call MAKE_INSTALL,$(REDIS_CLI_NAME),$(INSTALL_BIN))
- 	@ln -sf $(REDIS_SERVER_NAME) $(INSTALL_BIN)/$(REDIS_CHECK_RDB_NAME)
+ 	$(call MAKE_INSTALL,$(SERVER_NAME),$(INSTALL_BIN))
++	$(call MAKE_INSTALL,$(ENGINE_SENTICACHE_NAME),$(INSTALL_BIN))
+ 	$(call MAKE_INSTALL,$(ENGINE_BENCHMARK_NAME),$(INSTALL_BIN))
+ 	$(call MAKE_INSTALL,$(ENGINE_CLI_NAME),$(INSTALL_BIN))
+ 	@ln -sf $(SERVER_NAME) $(INSTALL_BIN)/$(ENGINE_CHECK_RDB_NAME)
 diff --git a/src/commands.def b/src/commands.def
-index ce071e85a..91869e5f4 100644
+index 6deb5a6cf..8f53ef500 100644
 --- a/src/commands.def
 +++ b/src/commands.def
-@@ -5361,6 +5361,23 @@ struct COMMAND_STRUCT SCRIPT_Subcommands[] = {
+@@ -5478,6 +5478,23 @@ struct COMMAND_STRUCT SCRIPT_Subcommands[] = {
  #define SCRIPT_Keyspecs NULL
  #endif
  
@@ -134,14 +134,14 @@ index ce071e85a..91869e5f4 100644
  /********** SENTINEL CKQUORUM ********************/
  
  #ifndef SKIP_CMD_HISTORY_TABLE
-@@ -5840,6 +5857,7 @@ struct COMMAND_ARG SENTINEL_SLAVES_Args[] = {
+@@ -6043,6 +6060,7 @@ struct COMMAND_ARG SENTINEL_SLAVES_Args[] = {
  
  /* SENTINEL command table */
  struct COMMAND_STRUCT SENTINEL_Subcommands[] = {
 +{MAKE_CMD("cache-update","Update Senticache state","O(1)","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_CACHE_UPDATE_History,0,SENTINEL_CACHE_UPDATE_Tips,0,sentinelCommand,-4,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_CACHE_UPDATE_Keyspecs,0,NULL,0)},
- {MAKE_CMD("ckquorum","Checks for a Redis Sentinel quorum.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,0,SENTINEL_CKQUORUM_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_CKQUORUM_Keyspecs,0,NULL,1),.args=SENTINEL_CKQUORUM_Args},
- {MAKE_CMD("config","Configures Redis Sentinel.","O(N) when N is the number of configuration parameters provided","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,1,SENTINEL_CONFIG_Tips,0,sentinelCommand,-4,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_CONFIG_Keyspecs,0,NULL,1),.args=SENTINEL_CONFIG_Args},
- {MAKE_CMD("debug","Lists or updates the current configurable parameters of Redis Sentinel.","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,0,SENTINEL_DEBUG_Tips,0,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_DEBUG_Keyspecs,0,NULL,1),.args=SENTINEL_DEBUG_Args},
+ {MAKE_CMD("ckquorum","Checks for a Sentinel quorum.",NULL,"2.8.4",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_CKQUORUM_History,0,SENTINEL_CKQUORUM_Tips,0,sentinelCommand,3,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_CKQUORUM_Keyspecs,0,NULL,1),.args=SENTINEL_CKQUORUM_Args},
+ {MAKE_CMD("config","Configures Sentinel.","O(N) when N is the number of configuration parameters provided","6.2.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_CONFIG_History,1,SENTINEL_CONFIG_Tips,0,sentinelCommand,-4,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_CONFIG_Keyspecs,0,NULL,1),.args=SENTINEL_CONFIG_Args},
+ {MAKE_CMD("debug","Lists or updates the current configurable parameters of Sentinel.","O(N) where N is the number of configurable parameters","7.0.0",CMD_DOC_NONE,NULL,NULL,"sentinel",COMMAND_GROUP_SENTINEL,SENTINEL_DEBUG_History,0,SENTINEL_DEBUG_Tips,0,sentinelCommand,-2,CMD_ADMIN|CMD_SENTINEL|CMD_ONLY_SENTINEL,0,SENTINEL_DEBUG_Keyspecs,0,NULL,1),.args=SENTINEL_DEBUG_Args},
 diff --git a/src/commands/sentinel-cache-update.json b/src/commands/sentinel-cache-update.json
 new file mode 100644
 index 000000000..acea39255
@@ -167,11 +167,11 @@ index 000000000..acea39255
 +}
 diff --git a/src/senticache.c b/src/senticache.c
 new file mode 100644
-index 000000000..d3c31052a
+index 000000000..787696897
 --- /dev/null
 +++ b/src/senticache.c
-@@ -0,0 +1,1777 @@
-+/* Redis SentiCache implementation */
+@@ -0,0 +1,1751 @@
++/* Valkey SentiCache implementation */
 +
 +#include "server.h"
 +#include "hiredis.h"
@@ -190,84 +190,86 @@ index 000000000..d3c31052a
 +extern char **environ;
 +
 +#ifdef USE_OPENSSL
-+extern SSL_CTX *redis_tls_ctx;
-+extern SSL_CTX *redis_tls_client_ctx;
++extern SSL_CTX *valkey_tls_ctx;
++extern SSL_CTX *valkey_tls_client_ctx;
 +#endif
 +
-+#define REDIS_SENTINEL_PORT 26379
++#define VALKEY_SENTINEL_PORT 26379
 +
 +/* ======================== Sentinel global state =========================== */
 +
 +/* Address object, used to describe an ip:port pair. */
 +typedef struct sentinelAddr {
-+    char *hostname;         /* Hostname OR address, as specified */
-+    char *ip;               /* Always a resolved address */
++    char *hostname; /* Hostname OR address, as specified */
++    char *ip;       /* Always a resolved address */
 +    int port;
 +} sentinelAddr;
 +
-+/* A Sentinel Redis Instance object is monitoring. */
-+#define SRI_MASTER  (1<<0)
-+#define SRI_SLAVE   (1<<1)
-+#define SRI_SENTINEL (1<<2)
-+#define SRI_S_DOWN (1<<3)   /* Subjectively down (no quorum). */
-+#define SRI_O_DOWN (1<<4)   /* Objectively down (confirmed by others). */
-+#define SRI_MASTER_DOWN (1<<5) /* A Sentinel with this flag set thinks that
-+                                   its master is down. */
-+#define SRI_FAILOVER_IN_PROGRESS (1<<6) /* Failover is in progress for
-+                                           this master. */
-+#define SRI_PROMOTED (1<<7)            /* Slave selected for promotion. */
-+#define SRI_RECONF_SENT (1<<8)     /* SLAVEOF <newmaster> sent. */
-+#define SRI_RECONF_INPROG (1<<9)   /* Slave synchronization in progress. */
-+#define SRI_RECONF_DONE (1<<10)     /* Slave synchronized with new master. */
-+#define SRI_FORCE_FAILOVER (1<<11)  /* Force failover with master up. */
-+#define SRI_SCRIPT_KILL_SENT (1<<12) /* SCRIPT KILL already sent on -BUSY */
-+#define SRI_MASTER_REBOOT  (1<<13)   /* Master was detected as rebooting */
++/* A Sentinel Valkey Instance object is monitoring. */
++#define SRI_PRIMARY (1 << 0)
++#define SRI_REPLICA (1 << 1)
++#define SRI_SENTINEL (1 << 2)
++#define SRI_S_DOWN (1 << 3) /* Subjectively down (no quorum). */
++#define SRI_O_DOWN (1 << 4) /* Objectively down (confirmed by others). */
++#define SRI_PRIMARY_DOWN                                                                                               \
++    (1 << 5) /* A Sentinel with this flag set thinks that                                                              \
++                its primary is down. */
++#define SRI_FAILOVER_IN_PROGRESS                                                                                       \
++    (1 << 6)                           /* Failover is in progress for                                                  \
++                                          this primary. */
++#define SRI_PROMOTED (1 << 7)          /* Replica selected for promotion. */
++#define SRI_RECONF_SENT (1 << 8)       /* REPLICAOF <newprimary> sent. */
++#define SRI_RECONF_INPROG (1 << 9)     /* Replica synchronization in progress. */
++#define SRI_RECONF_DONE (1 << 10)      /* Replica synchronized with new primary. */
++#define SRI_FORCE_FAILOVER (1 << 11)   /* Force failover with primary up. */
++#define SRI_SCRIPT_KILL_SENT (1 << 12) /* SCRIPT KILL already sent on -BUSY */
++#define SRI_PRIMARY_REBOOT (1 << 13)   /* Primary was detected as rebooting */
 +
 +/* Note: times are in milliseconds. */
 +#define SENTINEL_PING_PERIOD 1000
 +
 +static mstime_t sentinel_default_down_after = 30000;
-+static mstime_t sentinel_default_failover_timeout = 60*3*1000;
++static mstime_t sentinel_default_failover_timeout = 60 * 3 * 1000;
 +
-+#define SENTINEL_DEFAULT_SLAVE_PRIORITY 100
++#define SENTINEL_DEFAULT_REPLICA_PRIORITY 100
 +#define SENTINEL_DEFAULT_PARALLEL_SYNCS 1
 +
 +#define SENTINEL_DEFAULT_RESOLVE_HOSTNAMES 0
 +#define SENTINEL_DEFAULT_ANNOUNCE_HOSTNAMES 0
 +
 +/* Failover machine different states. */
-+#define SENTINEL_FAILOVER_STATE_NONE 0  /* No failover in progress. */
-+#define SENTINEL_FAILOVER_STATE_WAIT_START 1  /* Wait for failover_start_time*/
-+#define SENTINEL_FAILOVER_STATE_SELECT_SLAVE 2 /* Select slave to promote */
-+#define SENTINEL_FAILOVER_STATE_SEND_SLAVEOF_NOONE 3 /* Slave -> Master */
-+#define SENTINEL_FAILOVER_STATE_WAIT_PROMOTION 4 /* Wait slave to change role */
-+#define SENTINEL_FAILOVER_STATE_RECONF_SLAVES 5 /* SLAVEOF newmaster */
-+#define SENTINEL_FAILOVER_STATE_UPDATE_CONFIG 6 /* Monitor promoted slave. */
++#define SENTINEL_FAILOVER_STATE_NONE 0                 /* No failover in progress. */
++#define SENTINEL_FAILOVER_STATE_WAIT_START 1           /* Wait for failover_start_time*/
++#define SENTINEL_FAILOVER_STATE_SELECT_REPLICA 2       /* Select replica to promote */
++#define SENTINEL_FAILOVER_STATE_SEND_REPLICAOF_NOONE 3 /* Replica -> Primary */
++#define SENTINEL_FAILOVER_STATE_WAIT_PROMOTION 4       /* Wait replica to change role */
++#define SENTINEL_FAILOVER_STATE_RECONF_REPLICAS 5      /* REPLICAOF newprimary */
++#define SENTINEL_FAILOVER_STATE_UPDATE_CONFIG 6        /* Monitor promoted replica. */
 +
-+#define SENTINEL_MASTER_LINK_STATUS_UP 0
-+#define SENTINEL_MASTER_LINK_STATUS_DOWN 1
++#define SENTINEL_PRIMARY_LINK_STATUS_UP 0
++#define SENTINEL_PRIMARY_LINK_STATUS_DOWN 1
 +
 +/* SENTINEL SIMULATE-FAILURE command flags. */
 +#define SENTINEL_SIMFAILURE_NONE 0
 +
-+typedef struct sentinelRedisInstance {
-+    int flags;      /* See SRI_... defines */
-+    char *name;     /* Master name from the point of view of this sentinel. */
-+    char *runid;    /* Run ID of this instance, or unique ID if is a Sentinel.*/
-+    uint64_t config_epoch;  /* Configuration epoch. */
-+    sentinelAddr *addr; /* Master host. */
-+    mstime_t last_pub_time;   /* Last time we sent hello via Pub/Sub. */
-+    mstime_t last_hello_time; /* Only used if SRI_SENTINEL is set. Last time
-+                                 we received a hello from this Sentinel
-+                                 via Pub/Sub. */
-+    mstime_t last_master_down_reply_time; /* Time of last reply to
-+                                             SENTINEL is-master-down command. */
-+    mstime_t s_down_since_time; /* Subjectively down since time. */
-+    mstime_t o_down_since_time; /* Objectively down since time. */
-+    mstime_t down_after_period; /* Consider it down after that period. */
-+    mstime_t master_reboot_down_after_period; /* Consider master down after that period. */
-+    mstime_t master_reboot_since_time; /* master reboot time since time. */
-+    mstime_t info_refresh;  /* Time at which we received INFO output from it. */
++typedef struct sentinelValkeyInstance {
++    int flags;                                 /* See SRI_... defines */
++    char *name;                                /* Primary name from the point of view of this sentinel. */
++    char *runid;                               /* Run ID of this instance, or unique ID if is a Sentinel.*/
++    uint64_t config_epoch;                     /* Configuration epoch. */
++    sentinelAddr *addr;                        /* Primary host. */
++    mstime_t last_pub_time;                    /* Last time we sent hello via Pub/Sub. */
++    mstime_t last_hello_time;                  /* Only used if SRI_SENTINEL is set. Last time
++                                                  we received a hello from this Sentinel
++                                                  via Pub/Sub. */
++    mstime_t last_primary_down_reply_time;     /* Time of last reply to
++                                                 SENTINEL is-primary-down command. */
++    mstime_t s_down_since_time;                /* Subjectively down since time. */
++    mstime_t o_down_since_time;                /* Objectively down since time. */
++    mstime_t down_after_period;                /* Consider it down after that period. */
++    mstime_t primary_reboot_down_after_period; /* Consider primary down after that period. */
++    mstime_t primary_reboot_since_time;        /* primary reboot time since time. */
++    mstime_t info_refresh;                     /* Time at which we received INFO output from it. */
 +
 +    /* Role and the first time we observed it.
 +     * This is useful in order to delay replacing what the instance reports
@@ -276,96 +278,95 @@ index 000000000..d3c31052a
 +     * we do silly things. */
 +    int role_reported;
 +    mstime_t role_reported_time;
-+    mstime_t slave_conf_change_time; /* Last time slave master addr changed. */
-+
-+    /* Master specific. */
-+    dict *sentinels;    /* Other sentinels monitoring the same master. */
-+    dict *slaves;       /* Slaves for this master instance. */
-+    unsigned int quorum;/* Number of sentinels that need to agree on failure. */
-+    int parallel_syncs; /* How many slaves to reconfigure at same time. */
-+
-+    /* Slave specific. */
-+    mstime_t master_link_down_time; /* Slave replication link down time. */
-+    int slave_priority; /* Slave priority according to its INFO output. */
-+    int replica_announced; /* Replica announcing according to its INFO output. */
-+    struct sentinelRedisInstance *master; /* Master instance if it's slave. */
-+    char *slave_master_host;    /* Master host as reported by INFO */
-+    int slave_master_port;      /* Master port as reported by INFO */
-+    int slave_master_link_status; /* Master link status as reported by INFO */
-+    unsigned long long slave_repl_offset; /* Slave replication offset. */
++    mstime_t replica_conf_change_time; /* Last time replica primary addr changed. */
++
++    /* Primary specific. */
++    dict *sentinels;     /* Other sentinels monitoring the same primary. */
++    dict *replicas;      /* Replicas for this primary instance. */
++    unsigned int quorum; /* Number of sentinels that need to agree on failure. */
++    int parallel_syncs;  /* How many replicas to reconfigure at same time. */
++
++    /* Replica specific. */
++    mstime_t primary_link_down_time;        /* Replica replication link down time. */
++    int replica_priority;                   /* Replica priority according to its INFO output. */
++    int replica_announced;                  /* Replica announcing according to its INFO output. */
++    struct sentinelValkeyInstance *primary; /* Primary instance if it's replica. */
++    char *replica_primary_host;             /* Primary host as reported by INFO */
++    int replica_primary_port;               /* Primary port as reported by INFO */
++    int replica_primary_link_status;        /* Primary link status as reported by INFO */
++    unsigned long long replica_repl_offset; /* Replica replication offset. */
 +    /* Failover */
-+    char *leader;       /* If this is a master instance, this is the runid of
-+                           the Sentinel that should perform the failover. If
-+                           this is a Sentinel, this is the runid of the Sentinel
-+                           that this Sentinel voted as leader. */
-+    uint64_t leader_epoch; /* Epoch of the 'leader' field. */
++    char *leader;            /* If this is a primary instance, this is the runid of
++                                the Sentinel that should perform the failover. If
++                                this is a Sentinel, this is the runid of the Sentinel
++                                that this Sentinel voted as leader. */
++    uint64_t leader_epoch;   /* Epoch of the 'leader' field. */
 +    uint64_t failover_epoch; /* Epoch of the currently started failover. */
-+    int failover_state; /* See SENTINEL_FAILOVER_STATE_* defines. */
++    int failover_state;      /* See SENTINEL_FAILOVER_STATE_* defines. */
 +    mstime_t failover_state_change_time;
 +    mstime_t failover_start_time;   /* Last failover attempt start time. */
 +    mstime_t failover_timeout;      /* Max time to refresh failover state. */
 +    mstime_t failover_delay_logged; /* For what failover_start_time value we
 +                                       logged the failover delay. */
-+    sds info; /* cached INFO output */
-+} sentinelRedisInstance;
++    sds info;                       /* cached INFO output */
++} sentinelValkeyInstance;
 +
 +/* Main state. */
 +struct sentinelState {
-+    char myid[CONFIG_RUN_ID_SIZE+1]; /* This sentinel ID. */
-+    uint64_t current_epoch;         /* Current epoch. */
-+    dict *masters;      /* Dictionary of master sentinelRedisInstances.
-+                           Key is the instance name, value is the
-+                           sentinelRedisInstance structure pointer. */
-+    int tilt;           /* Are we in TILT mode? */
-+    int running_scripts;    /* Number of scripts in execution right now. */
-+    mstime_t tilt_start_time;       /* When TITL started. */
-+    mstime_t previous_time;         /* Last time we ran the time handler. */
-+    unsigned long simfailure_flags; /* Failures simulation. */
-+    sds cache_update_secret;     /* Secret for cache update command. */
-+    int resolve_hostnames;       /* Support use of hostnames, assuming DNS is well configured. */
-+    int announce_hostnames;      /* Announce hostnames instead of IPs when we have them. */
++    char myid[CONFIG_RUN_ID_SIZE + 1]; /* This sentinel ID. */
++    uint64_t current_epoch;            /* Current epoch. */
++    dict *primaries;                   /* Dictionary of primary sentinelValkeyInstances.
++                                        Key is the instance name, value is the
++                                        sentinelValkeyInstance structure pointer. */
++    int tilt;                          /* Are we in TILT mode? */
++    int running_scripts;               /* Number of scripts in execution right now. */
++    mstime_t tilt_start_time;          /* When TITL started. */
++    mstime_t previous_time;            /* Last time we ran the time handler. */
++    unsigned long simfailure_flags;    /* Failures simulation. */
++    sds cache_update_secret;           /* Secret for cache update command. */
++    int resolve_hostnames;             /* Support use of hostnames, assuming DNS is well configured. */
++    int announce_hostnames;            /* Announce hostnames instead of IPs when we have them. */
 +} sentinel;
 +
 +/* ============================= Prototypes ================================= */
 +
-+sentinelRedisInstance *sentinelGetMasterByName(char *name);
-+sentinelRedisInstance *sentinelGetMaster(void);
-+const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri);
-+void sentinelEvent(int level, char *type, sentinelRedisInstance *ri, const char *fmt, ...);
++sentinelValkeyInstance *sentinelGetPrimaryByName(char *name);
++sentinelValkeyInstance *sentinelGetPrimary(void);
++const char *sentinelValkeyInstanceTypeStr(sentinelValkeyInstance *ri);
++void sentinelEvent(int level, char *type, sentinelValkeyInstance *ri, const char *fmt, ...);
 +int sentinelFlushConfig(void);
-+sentinelRedisInstance *sentinelGetMaster(void);
++sentinelValkeyInstance *sentinelGetPrimary(void);
 +void initializeSentinelConfig(void);
 +void freeSentinelConfig(void);
 +
 +/* ========================= Dictionary types =============================== */
 +
-+void releaseSentinelRedisInstance(sentinelRedisInstance *ri);
++void releasesentinelValkeyInstance(sentinelValkeyInstance *ri);
 +
-+void dictInstancesValDestructor (dict *d, void *obj) {
++void dictInstancesValDestructor(dict *d, void *obj) {
 +    UNUSED(d);
-+    releaseSentinelRedisInstance(obj);
++    releasesentinelValkeyInstance(obj);
 +}
 +
-+/* Instance name (sds) -> instance (sentinelRedisInstance pointer)
++/* Instance name (sds) -> instance (sentinelValkeyInstance pointer)
 + *
-+ * also used for: sentinelRedisInstance->sentinels dictionary that maps
++ * also used for: sentinelValkeyInstance->sentinels dictionary that maps
 + * sentinels ip:port to last seen time in Pub/Sub hello message. */
 +dictType instancesDictType = {
-+    dictSdsHash,               /* hash function */
-+    NULL,                      /* key dup */
-+    NULL,                      /* val dup */
-+    dictSdsKeyCompare,         /* key compare */
-+    NULL,                      /* key destructor */
-+    dictInstancesValDestructor,/* val destructor */
-+    NULL                       /* allow to expand */
++    dictSdsHash,                /* hash function */
++    NULL,                       /* key dup */
++    dictSdsKeyCompare,          /* key compare */
++    NULL,                       /* key destructor */
++    dictInstancesValDestructor, /* val destructor */
++    NULL                        /* allow to expand */
 +};
 +
 +/* =========================== Initialization =============================== */
 +
-+/* This function overwrites a few normal Redis config default with Sentinel
++/* This function overwrites a few normal Valkey config default with Sentinel
 + * specific defaults. */
 +void initSentinelConfig(void) {
-+    server.port = REDIS_SENTINEL_PORT;
++    server.port = VALKEY_SENTINEL_PORT;
 +    server.protected_mode = 0; /* Sentinel must be exposed. */
 +}
 +
@@ -375,7 +376,7 @@ index 000000000..d3c31052a
 +void initSentinel(void) {
 +    /* Initialize various data structures. */
 +    sentinel.current_epoch = 0;
-+    sentinel.masters = dictCreate(&instancesDictType);
++    sentinel.primaries = dictCreate(&instancesDictType);
 +    sentinel.tilt = 0;
 +    sentinel.tilt_start_time = 0;
 +    sentinel.previous_time = mstime();
@@ -384,7 +385,7 @@ index 000000000..d3c31052a
 +    sentinel.resolve_hostnames = SENTINEL_DEFAULT_RESOLVE_HOSTNAMES;
 +    sentinel.announce_hostnames = SENTINEL_DEFAULT_ANNOUNCE_HOSTNAMES;
 +    sentinel.cache_update_secret = sdsnew("mysecret");
-+    memset(sentinel.myid,0,sizeof(sentinel.myid));
++    memset(sentinel.myid, 0, sizeof(sentinel.myid));
 +    server.sentinel_config = NULL;
 +}
 +
@@ -392,13 +393,11 @@ index 000000000..d3c31052a
 + * also checking whether we have write permissions. */
 +void sentinelCheckConfigFile(void) {
 +    if (server.configfile == NULL) {
-+        serverLog(LL_WARNING,
-+            "Sentinel needs config file on disk to save state. Exiting...");
++        serverLog(LL_WARNING, "Sentinel needs config file on disk to save state. Exiting...");
 +        exit(1);
-+    } else if (access(server.configfile,W_OK) == -1) {
-+        serverLog(LL_WARNING,
-+            "Sentinel config file %s is not writable: %s. Exiting...",
-+            server.configfile,strerror(errno));
++    } else if (access(server.configfile, W_OK) == -1) {
++        serverLog(LL_WARNING, "Sentinel config file %s is not writable: %s. Exiting...", server.configfile,
++                  strerror(errno));
 +        exit(1);
 +    }
 +}
@@ -416,12 +415,12 @@ index 000000000..d3c31052a
 +
 +    if (j == CONFIG_RUN_ID_SIZE) {
 +        /* Pick ID and persist the config. */
-+        getRandomHexChars(sentinel.myid,CONFIG_RUN_ID_SIZE);
++        getRandomHexChars(sentinel.myid, CONFIG_RUN_ID_SIZE);
 +        sentinelFlushConfig();
 +    }
 +
 +    /* Log its ID to make debugging of issues simpler. */
-+    serverLog(LL_WARNING,"Sentinel ID is %s", sentinel.myid);
++    serverLog(LL_WARNING, "Sentinel ID is %s", sentinel.myid);
 +}
 +
 +/* ============================== sentinelAddr ============================== */
@@ -439,13 +438,12 @@ index 000000000..d3c31052a
 +        errno = EINVAL;
 +        return NULL;
 +    }
-+    if (anetResolve(NULL,hostname,ip,sizeof(ip),
-+                    sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR) {
++    if (anetResolve(NULL, hostname, ip, sizeof(ip), sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) ==
++        ANET_ERR) {
 +        serverLog(LL_WARNING, "Failed to resolve hostname '%s'", hostname);
 +        if (sentinel.resolve_hostnames && is_accept_unresolved) {
 +            ip[0] = '\0';
-+        }
-+        else {
++        } else {
 +            errno = ENOENT;
 +            return NULL;
 +        }
@@ -477,16 +475,14 @@ index 000000000..d3c31052a
 +
 +/* Return non-zero if two addresses are equal. */
 +int sentinelAddrIsEqual(sentinelAddr *a, sentinelAddr *b) {
-+    return a->port == b->port && !strcasecmp(a->ip,b->ip);
++    return a->port == b->port && !strcasecmp(a->ip, b->ip);
 +}
 +
 +/* Return non-zero if the two addresses are equal, either by address
 + * or by hostname if they could not have been resolved.
 + */
 +int sentinelAddrOrHostnameEqual(sentinelAddr *a, sentinelAddr *b) {
-+    return a->port == b->port &&
-+            (!strcmp(a->ip, b->ip)  ||
-+            !strcasecmp(a->hostname, b->hostname));
++    return a->port == b->port && (!strcmp(a->ip, b->ip) || !strcasecmp(a->hostname, b->hostname));
 +}
 +
 +/* Return non-zero if a hostname matches an address. */
@@ -494,8 +490,7 @@ index 000000000..d3c31052a
 +    char ip[NET_IP_STR_LEN];
 +
 +    /* We always resolve the hostname and compare it to the address */
-+    if (anetResolve(NULL, hostname, ip, sizeof(ip),
-+                    sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR)
++    if (anetResolve(NULL, hostname, ip, sizeof(ip), sentinel.resolve_hostnames ? ANET_NONE : ANET_IP_ONLY) == ANET_ERR)
 +        return 0;
 +    return !strcasecmp(a->ip, ip);
 +}
@@ -524,7 +519,7 @@ index 000000000..d3c31052a
 + *
 + * 'type' is the message type, also used as a pub/sub channel name.
 + *
-+ * 'ri', is the redis instance target of this event if applicable, and is
++ * 'ri', is the valkey instance target of this event if applicable, and is
 + * used to obtain the path of the notification script to execute.
 + *
 + * The remaining arguments are printf-alike.
@@ -534,33 +529,29 @@ index 000000000..d3c31052a
 + *
 + *  <instance type> <instance name> <ip> <port>
 + *
-+ *  If the instance type is not master, than the additional string is
-+ *  added to specify the originating master:
++ *  If the instance type is not primary, than the additional string is
++ *  added to specify the originating primary:
 + *
-+ *  @ <master name> <master ip> <master port>
++ *  @ <primary name> <primary ip> <primary port>
 + *
 + *  Any other specifier after "%@" is processed by printf itself.
 + */
-+void sentinelEvent(int level, char *type, sentinelRedisInstance *ri,
-+                   const char *fmt, ...) {
++void sentinelEvent(int level, char *type, sentinelValkeyInstance *ri, const char *fmt, ...) {
 +    va_list ap;
 +    char msg[LOG_MAX_LEN];
 +    robj *channel, *payload;
 +
 +    /* Handle %@ */
 +    if (fmt[0] == '%' && fmt[1] == '@') {
-+        sentinelRedisInstance *master = (ri->flags & SRI_MASTER) ?
-+                                         NULL : ri->master;
-+
-+        if (master) {
-+            snprintf(msg, sizeof(msg), "%s %s %s %d @ %s %s %d",
-+                sentinelRedisInstanceTypeStr(ri),
-+                ri->name, announceSentinelAddr(ri->addr), ri->addr->port,
-+                master->name, announceSentinelAddr(master->addr), master->addr->port);
++        sentinelValkeyInstance *primary = (ri->flags & SRI_PRIMARY) ? NULL : ri->primary;
++
++        if (primary) {
++            snprintf(msg, sizeof(msg), "%s %s %s %d @ %s %s %d", sentinelValkeyInstanceTypeStr(ri), ri->name,
++                     announceSentinelAddr(ri->addr), ri->addr->port, primary->name, announceSentinelAddr(primary->addr),
++                     primary->addr->port);
 +        } else {
-+            snprintf(msg, sizeof(msg), "%s %s %s %d",
-+                sentinelRedisInstanceTypeStr(ri),
-+                ri->name, announceSentinelAddr(ri->addr), ri->addr->port);
++            snprintf(msg, sizeof(msg), "%s %s %s %d", sentinelValkeyInstanceTypeStr(ri), ri->name,
++                     announceSentinelAddr(ri->addr), ri->addr->port);
 +        }
 +        fmt += 2;
 +    } else {
@@ -570,38 +561,37 @@ index 000000000..d3c31052a
 +    /* Use vsprintf for the rest of the formatting if any. */
 +    if (fmt[0] != '\0') {
 +        va_start(ap, fmt);
-+        vsnprintf(msg+strlen(msg), sizeof(msg)-strlen(msg), fmt, ap);
++        vsnprintf(msg + strlen(msg), sizeof(msg) - strlen(msg), fmt, ap);
 +        va_end(ap);
 +    }
 +
 +    /* Log the message if the log level allows it to be logged. */
-+    if (level >= server.verbosity)
-+        serverLog(level,"%s %s",type,msg);
++    if (level >= server.verbosity) serverLog(level, "%s %s", type, msg);
 +
 +    /* Publish the message via Pub/Sub if it's not a debugging one. */
 +    if (level != LL_DEBUG) {
-+        channel = createStringObject(type,strlen(type));
-+        payload = createStringObject(msg,strlen(msg));
-+        pubsubPublishMessage(channel,payload,0);
++        channel = createStringObject(type, strlen(type));
++        payload = createStringObject(msg, strlen(msg));
++        pubsubPublishMessage(channel, payload, 0);
 +        decrRefCount(channel);
 +        decrRefCount(payload);
 +    }
 +}
 +
-+/* ========================== sentinelRedisInstance ========================= */
++/* ========================== sentinelValkeyInstance ========================= */
 +
-+/* Create a redis instance, the following fields must be populated by the
++/* Create a valkey instance, the following fields must be populated by the
 + * caller if needed:
 + * runid: set to NULL but will be populated once INFO output is received.
 + * info_refresh: is set to 0 to mean that we never received INFO so far.
 + *
-+ * If SRI_MASTER is set into initial flags the instance is added to
-+ * sentinel.masters table.
++ * If SRI_PRIMARY is set into initial flags the instance is added to
++ * sentinel.primaries table.
 + *
-+ * if SRI_SLAVE or SRI_SENTINEL is set then 'master' must be not NULL and the
-+ * instance is added into master->slaves or master->sentinels table.
++ * if SRI_REPLICA or SRI_SENTINEL is set then 'primary' must be not NULL and the
++ * instance is added into primary->replicas or primary->sentinels table.
 + *
-+ * If the instance is a slave, the name parameter is ignored and is created
++ * If the instance is a replica, the name parameter is ignored and is created
 + * automatically as ip/hostname:port.
 + *
 + * The function fails if hostname can't be resolved or port is out of range.
@@ -609,36 +599,44 @@ index 000000000..d3c31052a
 + * createSentinelAddr() function.
 + *
 + * The function may also fail and return NULL with errno set to EBUSY if
-+ * a master with the same name, a slave with the same address, or a sentinel
++ * a primary with the same name, a replica with the same address, or a sentinel
 + * with the same ID already exists. */
 +
-+sentinelRedisInstance *createSentinelRedisInstance(char *name, int flags, char *hostname, int port, int quorum, sentinelRedisInstance *master) {
-+    sentinelRedisInstance *ri;
++sentinelValkeyInstance *createsentinelValkeyInstance(char *name,
++                                                     int flags,
++                                                     char *hostname,
++                                                     int port,
++                                                     int quorum,
++                                                     sentinelValkeyInstance *primary) {
++    sentinelValkeyInstance *ri;
 +    sentinelAddr *addr;
 +    dict *table = NULL;
 +    sds sdsname;
 +
-+    serverAssert(flags & (SRI_MASTER|SRI_SLAVE|SRI_SENTINEL));
-+    serverAssert((flags & SRI_MASTER) || master != NULL);
++    serverAssert(flags & (SRI_PRIMARY | SRI_REPLICA | SRI_SENTINEL));
++    serverAssert((flags & SRI_PRIMARY) || primary != NULL);
 +
 +    /* Check address validity. */
-+    addr = createSentinelAddr(hostname,port,1);
++    addr = createSentinelAddr(hostname, port, 1);
 +    if (addr == NULL) return NULL;
 +
-+    /* For slaves use ip/host:port as name. */
-+    if (flags & SRI_SLAVE)
++    /* For replicas use ip/host:port as name. */
++    if (flags & SRI_REPLICA)
 +        sdsname = announceSentinelAddrAndPort(addr);
 +    else
 +        sdsname = sdsnew(name);
 +
 +    /* Make sure the entry is not duplicated. This may happen when the same
-+     * name for a master is used multiple times inside the configuration or
-+     * if we try to add multiple times a slave or sentinel with same ip/port
-+     * to a master. */
-+    if (flags & SRI_MASTER) table = sentinel.masters;
-+    else if (flags & SRI_SLAVE) table = master->slaves;
-+    else if (flags & SRI_SENTINEL) table = master->sentinels;
-+    if (dictFind(table,sdsname)) {
++     * name for a primary is used multiple times inside the configuration or
++     * if we try to add multiple times a replica or sentinel with same ip/port
++     * to a primary. */
++    if (flags & SRI_PRIMARY)
++        table = sentinel.primaries;
++    else if (flags & SRI_REPLICA)
++        table = primary->replicas;
++    else if (flags & SRI_SENTINEL)
++        table = primary->sentinels;
++    if (dictFind(table, sdsname)) {
 +        releaseSentinelAddr(addr);
 +        sdsfree(sdsname);
 +        errno = EBUSY;
@@ -656,23 +654,23 @@ index 000000000..d3c31052a
 +    ri->addr = addr;
 +    ri->last_pub_time = mstime();
 +    ri->last_hello_time = mstime();
-+    ri->last_master_down_reply_time = mstime();
++    ri->last_primary_down_reply_time = mstime();
 +    ri->s_down_since_time = 0;
 +    ri->o_down_since_time = 0;
-+    ri->down_after_period = master ? master->down_after_period : sentinel_default_down_after;
-+    ri->master_reboot_down_after_period = 0;
-+    ri->master_link_down_time = 0;
-+    ri->slave_priority = SENTINEL_DEFAULT_SLAVE_PRIORITY;
++    ri->down_after_period = primary ? primary->down_after_period : sentinel_default_down_after;
++    ri->primary_reboot_down_after_period = 0;
++    ri->primary_link_down_time = 0;
++    ri->replica_priority = SENTINEL_DEFAULT_REPLICA_PRIORITY;
 +    ri->replica_announced = 1;
-+    ri->slave_master_host = NULL;
-+    ri->slave_master_port = 0;
-+    ri->slave_master_link_status = SENTINEL_MASTER_LINK_STATUS_DOWN;
-+    ri->slave_repl_offset = 0;
++    ri->replica_primary_host = NULL;
++    ri->replica_primary_port = 0;
++    ri->replica_primary_link_status = SENTINEL_PRIMARY_LINK_STATUS_DOWN;
++    ri->replica_repl_offset = 0;
 +    ri->sentinels = dictCreate(&instancesDictType);
 +    ri->quorum = quorum;
 +    ri->parallel_syncs = SENTINEL_DEFAULT_PARALLEL_SYNCS;
-+    ri->master = master;
-+    ri->slaves = dictCreate(&instancesDictType);
++    ri->primary = primary;
++    ri->replicas = dictCreate(&instancesDictType);
 +    ri->info_refresh = 0;
 +
 +    /* Failover state. */
@@ -687,28 +685,28 @@ index 000000000..d3c31052a
 +    ri->info = NULL;
 +
 +    /* Role */
-+    ri->role_reported = ri->flags & (SRI_MASTER|SRI_SLAVE);
++    ri->role_reported = ri->flags & (SRI_PRIMARY | SRI_REPLICA);
 +    ri->role_reported_time = mstime();
-+    ri->slave_conf_change_time = mstime();
++    ri->replica_conf_change_time = mstime();
 +
 +    /* Add into the right table. */
 +    dictAdd(table, ri->name, ri);
 +    return ri;
 +}
 +
-+/* Release this instance and all its slaves, sentinels, hiredis connections.
++/* Release this instance and all its replicas, sentinels, hiredis connections.
 + * This function does not take care of unlinking the instance from the main
-+ * masters table (if it is a master) or from its master sentinels/slaves table
-+ * if it is a slave or sentinel. */
-+void releaseSentinelRedisInstance(sentinelRedisInstance *ri) {
-+    /* Release all its slaves or sentinels if any. */
++ * primaries table (if it is a primary) or from its primary sentinels/replicas table
++ * if it is a replica or sentinel. */
++void releasesentinelValkeyInstance(sentinelValkeyInstance *ri) {
++    /* Release all its replicas or sentinels if any. */
 +    dictRelease(ri->sentinels);
-+    dictRelease(ri->slaves);
++    dictRelease(ri->replicas);
 +
 +    /* Free other resources. */
 +    sdsfree(ri->name);
 +    sdsfree(ri->runid);
-+    sdsfree(ri->slave_master_host);
++    sdsfree(ri->replica_primary_host);
 +    sdsfree(ri->leader);
 +    sdsfree(ri->info);
 +    releaseSentinelAddr(ri->addr);
@@ -716,55 +714,57 @@ index 000000000..d3c31052a
 +    zfree(ri);
 +}
 +
-+/* Lookup a slave in a master Redis instance, by ip and port. */
-+sentinelRedisInstance *sentinelRedisInstanceLookupSlave(
-+                sentinelRedisInstance *ri, char *slave_addr, int port)
-+{
++/* Lookup a replica in a primary valkey instance, by ip and port. */
++sentinelValkeyInstance *sentinelValkeyInstanceLookupReplica(sentinelValkeyInstance *ri, char *replica_addr, int port) {
 +    sds key;
-+    sentinelRedisInstance *slave;
++    sentinelValkeyInstance *replica;
 +    sentinelAddr *addr;
 +
-+    serverAssert(ri->flags & SRI_MASTER);
++    serverAssert(ri->flags & SRI_PRIMARY);
 +
-+    /* We need to handle a slave_addr that is potentially a hostname.
++    /* We need to handle a replica_addr that is potentially a hostname.
 +     * If that is the case, depending on configuration we either resolve
 +     * it and use the IP address or fail.
 +     */
-+    addr = createSentinelAddr(slave_addr, port, 0);
++    addr = createSentinelAddr(replica_addr, port, 0);
 +    if (!addr) return NULL;
 +    key = announceSentinelAddrAndPort(addr);
 +    releaseSentinelAddr(addr);
 +
-+    slave = dictFetchValue(ri->slaves,key);
++    replica = dictFetchValue(ri->replicas, key);
 +    sdsfree(key);
-+    return slave;
++    return replica;
 +}
 +
 +/* Return the name of the type of the instance as a string. */
-+const char *sentinelRedisInstanceTypeStr(sentinelRedisInstance *ri) {
-+    if (ri->flags & SRI_MASTER) return "master";
-+    else if (ri->flags & SRI_SLAVE) return "slave";
-+    else if (ri->flags & SRI_SENTINEL) return "sentinel";
-+    else return "unknown";
++const char *sentinelValkeyInstanceTypeStr(sentinelValkeyInstance *ri) {
++    if (ri->flags & SRI_PRIMARY)
++        return "master";
++    else if (ri->flags & SRI_REPLICA)
++        return "slave";
++    else if (ri->flags & SRI_SENTINEL)
++        return "sentinel";
++    else
++        return "unknown";
 +}
 +
-+/* Master lookup by name */
-+sentinelRedisInstance *sentinelGetMasterByName(char *name) {
-+    sentinelRedisInstance *ri;
++/* Primary lookup by name */
++sentinelValkeyInstance *sentinelGetPrimaryByName(char *name) {
++    sentinelValkeyInstance *ri;
 +    sds sdsname = sdsnew(name);
 +
-+    ri = dictFetchValue(sentinel.masters,sdsname);
++    ri = dictFetchValue(sentinel.primaries, sdsname);
 +    sdsfree(sdsname);
 +    return ri;
 +}
 +
-+/* Master lookup */
-+sentinelRedisInstance *sentinelGetMaster(void) {
++/* Primary lookup */
++sentinelValkeyInstance *sentinelGetPrimary(void) {
 +    dictIterator *di;
 +    dictEntry *de;
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +
-+    di = dictGetIterator(sentinel.masters);
++    di = dictGetIterator(sentinel.primaries);
 +    while ((de = dictNext(di)) != NULL) {
 +        ri = dictGetVal(de);
 +        break;
@@ -773,10 +773,10 @@ index 000000000..d3c31052a
 +    return ri;
 +}
 +
-+/* Return the current master address, that is, its address or the address
-+ * of the promoted slave if already operational. */
-+sentinelAddr *sentinelGetCurrentMasterAddress(sentinelRedisInstance *master) {
-+    return master->addr;
++/* Return the current primary address, that is, its address or the address
++ * of the promoted replica if already operational. */
++sentinelAddr *sentinelGetCurrentPrimaryAddress(sentinelValkeyInstance *primary) {
++    return primary->addr;
 +}
 +
 +/* ============================ Config handling ============================= */
@@ -787,9 +787,9 @@ index 000000000..d3c31052a
 +    server.sentinel_config->monitor_cfg = listCreate();
 +    server.sentinel_config->pre_monitor_cfg = listCreate();
 +    server.sentinel_config->post_monitor_cfg = listCreate();
-+    listSetFreeMethod(server.sentinel_config->monitor_cfg,freeSentinelLoadQueueEntry);
-+    listSetFreeMethod(server.sentinel_config->pre_monitor_cfg,freeSentinelLoadQueueEntry);
-+    listSetFreeMethod(server.sentinel_config->post_monitor_cfg,freeSentinelLoadQueueEntry);
++    listSetFreeMethod(server.sentinel_config->monitor_cfg, freeSentinelLoadQueueEntry);
++    listSetFreeMethod(server.sentinel_config->pre_monitor_cfg, freeSentinelLoadQueueEntry);
++    listSetFreeMethod(server.sentinel_config->post_monitor_cfg, freeSentinelLoadQueueEntry);
 +}
 +
 +/* destroy function for server.sentinel_config */
@@ -805,7 +805,7 @@ index 000000000..d3c31052a
 +/* free method for sentinelLoadQueueEntry when release the list */
 +void freeSentinelLoadQueueEntry(void *item) {
 +    struct sentinelLoadQueueEntry *entry = item;
-+    sdsfreesplitres(entry->argv,entry->argc);
++    sdsfreesplitres(entry->argv, entry->argc);
 +    sdsfree(entry->line);
 +    zfree(entry);
 +}
@@ -821,14 +821,14 @@ index 000000000..d3c31052a
 +    if (server.sentinel_config == NULL) initializeSentinelConfig();
 +
 +    entry = zmalloc(sizeof(struct sentinelLoadQueueEntry));
-+    entry->argv = zmalloc(sizeof(char*)*argc);
++    entry->argv = zmalloc(sizeof(char *) * argc);
 +    entry->argc = argc;
 +    entry->linenum = linenum;
 +    entry->line = sdsdup(line);
 +    for (i = 0; i < argc; i++) {
 +        entry->argv[i] = sdsdup(argv[i]);
 +    }
-+    listAddNodeTail(server.sentinel_config->pre_monitor_cfg,entry);
++    listAddNodeTail(server.sentinel_config->pre_monitor_cfg, entry);
 +}
 +
 +/* This function is used for loading the sentinel configuration from
@@ -844,19 +844,16 @@ index 000000000..d3c31052a
 +    /* if there is no sentinel_config entry, we can return immediately */
 +    if (server.sentinel_config == NULL) return;
 +
-+    list *sentinel_configs[3] = {
-+        server.sentinel_config->pre_monitor_cfg,
-+        server.sentinel_config->monitor_cfg,
-+        server.sentinel_config->post_monitor_cfg
-+    };
++    list *sentinel_configs[3] = {server.sentinel_config->pre_monitor_cfg, server.sentinel_config->monitor_cfg,
++                                 server.sentinel_config->post_monitor_cfg};
 +    /* loading from pre monitor config queue first to avoid dependency issues
 +     * loading from monitor config queue
 +     * loading from the post monitor config queue */
 +    for (j = 0; j < sizeof(sentinel_configs) / sizeof(sentinel_configs[0]); j++) {
-+        listRewind(sentinel_configs[j],&li);
-+        while((ln = listNext(&li))) {
++        listRewind(sentinel_configs[j], &li);
++        while ((ln = listNext(&li))) {
 +            struct sentinelLoadQueueEntry *entry = ln->value;
-+            err = sentinelHandleConfiguration(entry->argv,entry->argc);
++            err = sentinelHandleConfiguration(entry->argv, entry->argc);
 +            if (err) {
 +                linenum = entry->linenum;
 +                line = entry->line;
@@ -870,8 +867,7 @@ index 000000000..d3c31052a
 +    return;
 +
 +loaderr:
-+    fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (Redis %s) ***\n",
-+        REDIS_VERSION);
++    fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR (Valkey %s) ***\n", VALKEY_VERSION);
 +    fprintf(stderr, "Reading the configuration file, at line %d\n", linenum);
 +    fprintf(stderr, ">>> '%s'\n", line);
 +    fprintf(stderr, "%s\n", err);
@@ -879,21 +875,20 @@ index 000000000..d3c31052a
 +}
 +
 +const char *sentinelHandleConfiguration(char **argv, int argc) {
-+    if (!strcasecmp(argv[0],"myid") && argc == 2) {
-+        if (strlen(argv[1]) != CONFIG_RUN_ID_SIZE)
-+            return "Malformed Sentinel id in myid option.";
-+        memcpy(sentinel.myid,argv[1],CONFIG_RUN_ID_SIZE);
-+    } else if (!strcasecmp(argv[0],"resolve-hostnames") && argc == 2) {
++    if (!strcasecmp(argv[0], "myid") && argc == 2) {
++        if (strlen(argv[1]) != CONFIG_RUN_ID_SIZE) return "Malformed Sentinel id in myid option.";
++        memcpy(sentinel.myid, argv[1], CONFIG_RUN_ID_SIZE);
++    } else if (!strcasecmp(argv[0], "resolve-hostnames") && argc == 2) {
 +        /* resolve-hostnames <yes|no> */
 +        if ((sentinel.resolve_hostnames = yesnotoi(argv[1])) == -1) {
 +            return "Please specify yes or no for the resolve-hostnames option.";
 +        }
-+    } else if (!strcasecmp(argv[0],"announce-hostnames") && argc == 2) {
++    } else if (!strcasecmp(argv[0], "announce-hostnames") && argc == 2) {
 +        /* announce-hostnames <yes|no> */
 +        if ((sentinel.announce_hostnames = yesnotoi(argv[1])) == -1) {
 +            return "Please specify yes or no for the announce-hostnames option.";
 +        }
-+    } else if (!strcasecmp(argv[0],"set-cache-update-secret") && argc == 2) {
++    } else if (!strcasecmp(argv[0], "set-cache-update-secret") && argc == 2) {
 +        /* set-cache-update-secret <secret> */
 +        sdsfree(sentinel.cache_update_secret);
 +        sentinel.cache_update_secret = sdsnew(argv[1]);
@@ -905,42 +900,40 @@ index 000000000..d3c31052a
 +
 +/* Implements CONFIG REWRITE for "sentinel" option.
 + * This is used not just to rewrite the configuration given by the user
-+ * (the configured masters) but also in order to retain the state of
-+ * Sentinel across restarts: config epoch of masters, associated slaves
++ * (the configured primaries) but also in order to retain the state of
++ * Sentinel across restarts: config epoch of primaries, associated replicas
 + * and sentinel instances, and so forth. */
 +void rewriteConfigSentinelOption(struct rewriteConfigState *state) {
 +    sds line;
 +
 +    /* sentinel unique ID. */
 +    line = sdscatprintf(sdsempty(), "sentinel myid %s", sentinel.myid);
-+    rewriteConfigRewriteLine(state,"sentinel myid",line,1);
++    rewriteConfigRewriteLine(state, "sentinel myid", line, 1);
 +
 +    /* sentinel resolve-hostnames. */
-+    line = sdscatprintf(sdsempty(), "sentinel resolve-hostnames %s",
-+                        sentinel.resolve_hostnames ? "yes" : "no");
-+    rewriteConfigRewriteLine(state,"sentinel resolve-hostnames",line,
++    line = sdscatprintf(sdsempty(), "sentinel resolve-hostnames %s", sentinel.resolve_hostnames ? "yes" : "no");
++    rewriteConfigRewriteLine(state, "sentinel resolve-hostnames", line,
 +                             sentinel.resolve_hostnames != SENTINEL_DEFAULT_RESOLVE_HOSTNAMES);
 +
 +    /* sentinel set-cache-update-secret. */
 +    if (strcasecmp(sentinel.cache_update_secret, "mysecret")) {
-+        line = sdscatprintf(sdsempty(),"sentinel set-cache-update-secret %s",sentinel.cache_update_secret);
-+        rewriteConfigRewriteLine(state,"sentinel set-cache-update-secret",line,1);
++        line = sdscatprintf(sdsempty(), "sentinel set-cache-update-secret %s", sentinel.cache_update_secret);
++        rewriteConfigRewriteLine(state, "sentinel set-cache-update-secret", line, 1);
 +    }
 +
 +    /* sentinel announce-hostnames. */
-+    line = sdscatprintf(sdsempty(), "sentinel announce-hostnames %s",
-+                        sentinel.announce_hostnames ? "yes" : "no");
-+    rewriteConfigRewriteLine(state,"sentinel announce-hostnames",line,
++    line = sdscatprintf(sdsempty(), "sentinel announce-hostnames %s", sentinel.announce_hostnames ? "yes" : "no");
++    rewriteConfigRewriteLine(state, "sentinel announce-hostnames", line,
 +                             sentinel.announce_hostnames != SENTINEL_DEFAULT_ANNOUNCE_HOSTNAMES);
 +}
 +
-+/* This function uses the config rewriting Redis engine in order to persist
++/* This function uses the config rewriting Valkey engine in order to persist
 + * the state of the Sentinel in the current configuration file.
 + *
 + * Before returning the function calls fsync() against the generated
 + * configuration file to make sure changes are committed to disk.
 + *
-+ * On failure the function logs a warning on the Redis log. */
++ * On failure the function logs a warning on the Valkey log. */
 +int sentinelFlushConfig(void) {
 +    int fd = -1;
 +    int saved_hz = server.hz;
@@ -951,14 +944,15 @@ index 000000000..d3c31052a
 +    server.hz = saved_hz;
 +
 +    if (rewrite_status == -1) goto werr;
-+    if ((fd = open(server.configfile,O_RDONLY)) == -1) goto werr;
++    if ((fd = open(server.configfile, O_RDONLY)) == -1) goto werr;
 +    if (fsync(fd) == -1) goto werr;
 +    if (close(fd) == EOF) goto werr;
-+    serverLog(LL_NOTICE,"Sentinel new configuration saved on disk");
++    serverLog(LL_NOTICE, "Sentinel new configuration saved on disk");
 +    return C_OK;
 +
 +werr:
-+    serverLog(LL_WARNING,"WARNING: Sentinel was not able to save the new configuration on disk!!!: %s", strerror(errno));
++    serverLog(LL_WARNING, "WARNING: Sentinel was not able to save the new configuration on disk!!!: %s",
++              strerror(errno));
 +    if (fd != -1) close(fd);
 +    return C_ERR;
 +}
@@ -966,236 +960,230 @@ index 000000000..d3c31052a
 +/* =========================== SENTINEL command ============================= */
 +
 +const char *sentinelFailoverStateStr(int state) {
-+    switch(state) {
++    switch (state) {
 +    case SENTINEL_FAILOVER_STATE_NONE: return "none";
 +    case SENTINEL_FAILOVER_STATE_WAIT_START: return "wait_start";
-+    case SENTINEL_FAILOVER_STATE_SELECT_SLAVE: return "select_slave";
-+    case SENTINEL_FAILOVER_STATE_SEND_SLAVEOF_NOONE: return "send_slaveof_noone";
++    case SENTINEL_FAILOVER_STATE_SELECT_REPLICA: return "select_replica";
++    case SENTINEL_FAILOVER_STATE_SEND_REPLICAOF_NOONE: return "send_replicaof_noone";
 +    case SENTINEL_FAILOVER_STATE_WAIT_PROMOTION: return "wait_promotion";
-+    case SENTINEL_FAILOVER_STATE_RECONF_SLAVES: return "reconf_slaves";
++    case SENTINEL_FAILOVER_STATE_RECONF_REPLICAS: return "reconf_replicas";
 +    case SENTINEL_FAILOVER_STATE_UPDATE_CONFIG: return "update_config";
 +    default: return "unknown";
 +    }
 +}
 +
-+/* Redis instance to Redis protocol representation. */
-+void addReplySentinelRedisInstance(client *c, sentinelRedisInstance *ri) {
++/* Valkey instance to Valkey protocol representation. */
++void addReplysentinelValkeyInstance(client *c, sentinelValkeyInstance *ri) {
 +    char *flags = sdsempty();
 +    void *mbl;
 +    int fields = 0;
 +
 +    mbl = addReplyDeferredLen(c);
 +
-+    addReplyBulkCString(c,"name");
-+    addReplyBulkCString(c,ri->name);
++    addReplyBulkCString(c, "name");
++    addReplyBulkCString(c, ri->name);
 +    fields++;
 +
-+    addReplyBulkCString(c,"ip");
-+    addReplyBulkCString(c,announceSentinelAddr(ri->addr));
++    addReplyBulkCString(c, "ip");
++    addReplyBulkCString(c, announceSentinelAddr(ri->addr));
 +    fields++;
 +
-+    addReplyBulkCString(c,"port");
-+    addReplyBulkLongLong(c,ri->addr->port);
++    addReplyBulkCString(c, "port");
++    addReplyBulkLongLong(c, ri->addr->port);
 +    fields++;
 +
-+    addReplyBulkCString(c,"runid");
-+    addReplyBulkCString(c,ri->runid ? ri->runid : "");
++    addReplyBulkCString(c, "runid");
++    addReplyBulkCString(c, ri->runid ? ri->runid : "");
 +    fields++;
 +
-+    addReplyBulkCString(c,"flags");
-+    if (ri->flags & SRI_S_DOWN) flags = sdscat(flags,"s_down,");
-+    if (ri->flags & SRI_O_DOWN) flags = sdscat(flags,"o_down,");
-+    if (ri->flags & SRI_MASTER) flags = sdscat(flags,"master,");
-+    if (ri->flags & SRI_SLAVE) flags = sdscat(flags,"slave,");
-+    if (ri->flags & SRI_SENTINEL) flags = sdscat(flags,"sentinel,");
-+    if (ri->flags & SRI_MASTER_DOWN) flags = sdscat(flags,"master_down,");
-+    if (ri->flags & SRI_FAILOVER_IN_PROGRESS)
-+        flags = sdscat(flags,"failover_in_progress,");
-+    if (ri->flags & SRI_PROMOTED) flags = sdscat(flags,"promoted,");
-+    if (ri->flags & SRI_RECONF_SENT) flags = sdscat(flags,"reconf_sent,");
-+    if (ri->flags & SRI_RECONF_INPROG) flags = sdscat(flags,"reconf_inprog,");
-+    if (ri->flags & SRI_RECONF_DONE) flags = sdscat(flags,"reconf_done,");
-+    if (ri->flags & SRI_FORCE_FAILOVER) flags = sdscat(flags,"force_failover,");
-+    if (ri->flags & SRI_SCRIPT_KILL_SENT) flags = sdscat(flags,"script_kill_sent,");
-+
-+    if (sdslen(flags) != 0) sdsrange(flags,0,-2); /* remove last "," */
-+    addReplyBulkCString(c,flags);
++    addReplyBulkCString(c, "flags");
++    if (ri->flags & SRI_S_DOWN) flags = sdscat(flags, "s_down,");
++    if (ri->flags & SRI_O_DOWN) flags = sdscat(flags, "o_down,");
++    if (ri->flags & SRI_PRIMARY) flags = sdscat(flags, "master,");
++    if (ri->flags & SRI_REPLICA) flags = sdscat(flags, "slave,");
++    if (ri->flags & SRI_SENTINEL) flags = sdscat(flags, "sentinel,");
++    if (ri->flags & SRI_PRIMARY_DOWN) flags = sdscat(flags, "master_down,");
++    if (ri->flags & SRI_FAILOVER_IN_PROGRESS) flags = sdscat(flags, "failover_in_progress,");
++    if (ri->flags & SRI_PROMOTED) flags = sdscat(flags, "promoted,");
++    if (ri->flags & SRI_RECONF_SENT) flags = sdscat(flags, "reconf_sent,");
++    if (ri->flags & SRI_RECONF_INPROG) flags = sdscat(flags, "reconf_inprog,");
++    if (ri->flags & SRI_RECONF_DONE) flags = sdscat(flags, "reconf_done,");
++    if (ri->flags & SRI_FORCE_FAILOVER) flags = sdscat(flags, "force_failover,");
++    if (ri->flags & SRI_SCRIPT_KILL_SENT) flags = sdscat(flags, "script_kill_sent,");
++
++    if (sdslen(flags) != 0) sdsrange(flags, 0, -2); /* remove last "," */
++    addReplyBulkCString(c, flags);
 +    sdsfree(flags);
 +    fields++;
 +
-+    addReplyBulkCString(c,"link-pending-commands");
-+    addReplyBulkLongLong(c,0);
++    addReplyBulkCString(c, "link-pending-commands");
++    addReplyBulkLongLong(c, 0);
 +    fields++;
 +
-+    addReplyBulkCString(c,"link-refcount");
-+    addReplyBulkLongLong(c,0);
++    addReplyBulkCString(c, "link-refcount");
++    addReplyBulkLongLong(c, 0);
 +    fields++;
 +
 +    if (ri->flags & SRI_FAILOVER_IN_PROGRESS) {
-+        addReplyBulkCString(c,"failover-state");
-+        addReplyBulkCString(c,(char*)sentinelFailoverStateStr(ri->failover_state));
++        addReplyBulkCString(c, "failover-state");
++        addReplyBulkCString(c, (char *)sentinelFailoverStateStr(ri->failover_state));
 +        fields++;
 +    }
 +
-+    addReplyBulkCString(c,"last-ping-sent");
-+    addReplyBulkLongLong(c,0);
++    addReplyBulkCString(c, "last-ping-sent");
++    addReplyBulkLongLong(c, 0);
 +    fields++;
 +
-+    addReplyBulkCString(c,"last-ok-ping-reply");
-+    addReplyBulkLongLong(c,0);
++    addReplyBulkCString(c, "last-ok-ping-reply");
++    addReplyBulkLongLong(c, 0);
 +    fields++;
 +
-+    addReplyBulkCString(c,"last-ping-reply");
-+    addReplyBulkLongLong(c,0);
++    addReplyBulkCString(c, "last-ping-reply");
++    addReplyBulkLongLong(c, 0);
 +    fields++;
 +
 +    if (ri->flags & SRI_S_DOWN) {
-+        addReplyBulkCString(c,"s-down-time");
-+        addReplyBulkLongLong(c,mstime()-ri->s_down_since_time);
++        addReplyBulkCString(c, "s-down-time");
++        addReplyBulkLongLong(c, mstime() - ri->s_down_since_time);
 +        fields++;
 +    }
 +
 +    if (ri->flags & SRI_O_DOWN) {
-+        addReplyBulkCString(c,"o-down-time");
-+        addReplyBulkLongLong(c,mstime()-ri->o_down_since_time);
++        addReplyBulkCString(c, "o-down-time");
++        addReplyBulkLongLong(c, mstime() - ri->o_down_since_time);
 +        fields++;
 +    }
 +
-+    addReplyBulkCString(c,"down-after-milliseconds");
-+    addReplyBulkLongLong(c,ri->down_after_period);
++    addReplyBulkCString(c, "down-after-milliseconds");
++    addReplyBulkLongLong(c, ri->down_after_period);
 +    fields++;
 +
-+    /* Masters and Slaves */
-+    if (ri->flags & (SRI_MASTER|SRI_SLAVE)) {
-+        addReplyBulkCString(c,"info-refresh");
++    /* Primarys and Replicas */
++    if (ri->flags & (SRI_PRIMARY | SRI_REPLICA)) {
++        addReplyBulkCString(c, "info-refresh");
 +        addReplyBulkLongLong(c, 0);
 +        fields++;
 +
-+        addReplyBulkCString(c,"role-reported");
-+        addReplyBulkCString(c, (ri->role_reported == SRI_MASTER) ? "master" :
-+                                                                   "slave");
++        addReplyBulkCString(c, "role-reported");
++        addReplyBulkCString(c, (ri->role_reported == SRI_PRIMARY) ? "master" : "slave");
 +        fields++;
 +
-+        addReplyBulkCString(c,"role-reported-time");
-+        addReplyBulkLongLong(c,mstime() - ri->role_reported_time);
++        addReplyBulkCString(c, "role-reported-time");
++        addReplyBulkLongLong(c, mstime() - ri->role_reported_time);
 +        fields++;
 +    }
 +
-+    /* Only masters */
-+    if (ri->flags & SRI_MASTER) {
-+        addReplyBulkCString(c,"config-epoch");
-+        addReplyBulkLongLong(c,ri->config_epoch);
++    /* Only primaries */
++    if (ri->flags & SRI_PRIMARY) {
++        addReplyBulkCString(c, "config-epoch");
++        addReplyBulkLongLong(c, ri->config_epoch);
 +        fields++;
 +
-+        addReplyBulkCString(c,"num-slaves");
-+        addReplyBulkLongLong(c,dictSize(ri->slaves));
++        addReplyBulkCString(c, "num-slaves");
++        addReplyBulkLongLong(c, dictSize(ri->replicas));
 +        fields++;
 +
-+        addReplyBulkCString(c,"num-other-sentinels");
-+        addReplyBulkLongLong(c,dictSize(ri->sentinels));
++        addReplyBulkCString(c, "num-other-sentinels");
++        addReplyBulkLongLong(c, dictSize(ri->sentinels));
 +        fields++;
 +
-+        addReplyBulkCString(c,"quorum");
-+        addReplyBulkLongLong(c,ri->quorum);
++        addReplyBulkCString(c, "quorum");
++        addReplyBulkLongLong(c, ri->quorum);
 +        fields++;
 +
-+        addReplyBulkCString(c,"failover-timeout");
-+        addReplyBulkLongLong(c,ri->failover_timeout);
++        addReplyBulkCString(c, "failover-timeout");
++        addReplyBulkLongLong(c, ri->failover_timeout);
 +        fields++;
 +
-+        addReplyBulkCString(c,"parallel-syncs");
-+        addReplyBulkLongLong(c,ri->parallel_syncs);
++        addReplyBulkCString(c, "parallel-syncs");
++        addReplyBulkLongLong(c, ri->parallel_syncs);
 +        fields++;
 +    }
 +
-+    /* Only slaves */
-+    if (ri->flags & SRI_SLAVE) {
-+        addReplyBulkCString(c,"master-link-down-time");
-+        addReplyBulkLongLong(c,ri->master_link_down_time);
++    /* Only replicas */
++    if (ri->flags & SRI_REPLICA) {
++        addReplyBulkCString(c, "master-link-down-time");
++        addReplyBulkLongLong(c, ri->primary_link_down_time);
 +        fields++;
 +
-+        addReplyBulkCString(c,"master-link-status");
-+        addReplyBulkCString(c,
-+            (ri->slave_master_link_status == SENTINEL_MASTER_LINK_STATUS_UP) ?
-+            "ok" : "err");
++        addReplyBulkCString(c, "master-link-status");
++        addReplyBulkCString(c, (ri->replica_primary_link_status == SENTINEL_PRIMARY_LINK_STATUS_UP) ? "ok" : "err");
 +        fields++;
 +
-+        addReplyBulkCString(c,"master-host");
-+        addReplyBulkCString(c,
-+            ri->slave_master_host ? ri->slave_master_host : "?");
++        addReplyBulkCString(c, "master-host");
++        addReplyBulkCString(c, ri->replica_primary_host ? ri->replica_primary_host : "?");
 +        fields++;
 +
-+        addReplyBulkCString(c,"master-port");
-+        addReplyBulkLongLong(c,ri->slave_master_port);
++        addReplyBulkCString(c, "master-port");
++        addReplyBulkLongLong(c, ri->replica_primary_port);
 +        fields++;
 +
-+        addReplyBulkCString(c,"slave-priority");
-+        addReplyBulkLongLong(c,ri->slave_priority);
++        addReplyBulkCString(c, "slave-priority");
++        addReplyBulkLongLong(c, ri->replica_priority);
 +        fields++;
 +
-+        addReplyBulkCString(c,"slave-repl-offset");
-+        addReplyBulkLongLong(c,ri->slave_repl_offset);
++        addReplyBulkCString(c, "slave-repl-offset");
++        addReplyBulkLongLong(c, ri->replica_repl_offset);
 +        fields++;
 +
-+        addReplyBulkCString(c,"replica-announced");
-+        addReplyBulkLongLong(c,ri->replica_announced);
++        addReplyBulkCString(c, "replica-announced");
++        addReplyBulkLongLong(c, ri->replica_announced);
 +        fields++;
 +    }
 +
 +    /* Only sentinels */
 +    if (ri->flags & SRI_SENTINEL) {
-+        addReplyBulkCString(c,"last-hello-message");
-+        addReplyBulkLongLong(c,0);
++        addReplyBulkCString(c, "last-hello-message");
++        addReplyBulkLongLong(c, 0);
 +        fields++;
 +
-+        addReplyBulkCString(c,"voted-leader");
-+        addReplyBulkCString(c,ri->leader ? ri->leader : "?");
++        addReplyBulkCString(c, "voted-leader");
++        addReplyBulkCString(c, ri->leader ? ri->leader : "?");
 +        fields++;
 +
-+        addReplyBulkCString(c,"voted-leader-epoch");
-+        addReplyBulkLongLong(c,ri->leader_epoch);
++        addReplyBulkCString(c, "voted-leader-epoch");
++        addReplyBulkLongLong(c, ri->leader_epoch);
 +        fields++;
 +    }
 +
-+    setDeferredMapLen(c,mbl,fields);
++    setDeferredMapLen(c, mbl, fields);
 +}
 +
 +/* Output a number of instances contained inside a dictionary as
-+ * Redis protocol. */
-+void addReplyDictOfRedisInstances(client *c, dict *instances) {
++ * Valkey protocol. */
++void addReplyDictOfValkeyInstances(client *c, dict *instances) {
 +    dictIterator *di;
 +    dictEntry *de;
-+    long slaves = 0;
++    long replicas = 0;
 +    void *replylen = addReplyDeferredLen(c);
 +
 +    di = dictGetIterator(instances);
-+    while((de = dictNext(di)) != NULL) {
-+        sentinelRedisInstance *ri = dictGetVal(de);
++    while ((de = dictNext(di)) != NULL) {
++        sentinelValkeyInstance *ri = dictGetVal(de);
 +
 +        /* don't announce unannounced replicas */
-+        if (ri->flags & SRI_SLAVE && !ri->replica_announced) continue;
-+        addReplySentinelRedisInstance(c,ri);
-+        slaves++;
++        if (ri->flags & SRI_REPLICA && !ri->replica_announced) continue;
++        addReplysentinelValkeyInstance(c, ri);
++        replicas++;
 +    }
 +    dictReleaseIterator(di);
-+    setDeferredArrayLen(c, replylen, slaves);
++    setDeferredArrayLen(c, replylen, replicas);
 +}
 +
-+/* Get first master and use it. If no masters are in the cache reply with error. */
-+sentinelRedisInstance *sentinelGetMasterOrReplyError(client *c)
-+{
++/* Get first primary and use it. If no primaries are in the cache reply with error. */
++sentinelValkeyInstance *sentinelGetPrimaryOrReplyError(client *c) {
 +    dictIterator *di;
 +    dictEntry *de;
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +
-+    di = dictGetIterator(sentinel.masters);
++    di = dictGetIterator(sentinel.primaries);
 +    while ((de = dictNext(di)) != NULL) {
 +        ri = dictGetVal(de);
 +        break;
 +    }
 +    dictReleaseIterator(di);
 +    if (!ri) {
-+        addReplyError(c,"No such master with that name");
++        addReplyError(c, "No such master with that name");
 +        return NULL;
 +    }
 +    return ri;
@@ -1233,12 +1221,12 @@ index 000000000..d3c31052a
 +    return ret;
 +}
 +
-+static int sentinelUpdateMasterName(client *c, sds name) {
++static int sentinelUpdatePrimaryName(client *c, sds name) {
 +    dictIterator *di;
 +    dictEntry *de;
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +
-+    di = dictGetIterator(sentinel.masters);
++    di = dictGetIterator(sentinel.primaries);
 +    while ((de = dictNext(di)) != NULL) {
 +        ri = dictGetVal(de);
 +        break;
@@ -1247,19 +1235,19 @@ index 000000000..d3c31052a
 +    if (ri && !strcasecmp(name, ri->name)) {
 +        return 0;
 +    }
-+    sentinelRedisInstance *newRi = createSentinelRedisInstance(name, SRI_MASTER, "127.0.0.1", 0, 0, NULL);
++    sentinelValkeyInstance *newRi = createsentinelValkeyInstance(name, SRI_PRIMARY, "127.0.0.1", 0, 0, NULL);
 +    if (newRi == NULL) {
 +        addReplyError(c, "Creation of new master failed");
 +        return 1;
 +    }
 +    if (ri) {
-+        sentinelRedisInstance *origRi = NULL;
-+        sentinelRedisInstance *copyRi = NULL;
++        sentinelValkeyInstance *origRi = NULL;
++        sentinelValkeyInstance *copyRi = NULL;
 +        di = dictGetIterator(ri->sentinels);
 +        while ((de = dictNext(di)) != NULL) {
 +            origRi = dictGetVal(de);
-+            copyRi = createSentinelRedisInstance(origRi->name, origRi->flags, origRi->addr->hostname,
-+                                                 origRi->addr->port, 0, newRi);
++            copyRi = createsentinelValkeyInstance(origRi->name, origRi->flags, origRi->addr->hostname,
++                                                  origRi->addr->port, 0, newRi);
 +            if (copyRi == NULL) {
 +                addReplyError(c, "Sentinel copy failed");
 +                dictReleaseIterator(di);
@@ -1269,13 +1257,13 @@ index 000000000..d3c31052a
 +        dictReleaseIterator(di);
 +        origRi = NULL;
 +        copyRi = NULL;
-+        di = dictGetIterator(ri->slaves);
++        di = dictGetIterator(ri->replicas);
 +        while ((de = dictNext(di)) != NULL) {
 +            origRi = dictGetVal(de);
-+            copyRi = createSentinelRedisInstance(origRi->name, origRi->flags, origRi->addr->hostname,
-+                                                 origRi->addr->port, 0, newRi);
++            copyRi = createsentinelValkeyInstance(origRi->name, origRi->flags, origRi->addr->hostname,
++                                                  origRi->addr->port, 0, newRi);
 +            if (copyRi == NULL) {
-+                addReplyError(c, "Slave copy failed");
++                addReplyError(c, "Replica copy failed");
 +                dictReleaseIterator(di);
 +                return 1;
 +            }
@@ -1288,7 +1276,7 @@ index 000000000..d3c31052a
 +        }
 +        newRi->config_epoch = ri->config_epoch;
 +        newRi->role_reported_time = ri->role_reported_time;
-+        if (dictDelete(sentinel.masters, ri->name) != DICT_OK) {
++        if (dictDelete(sentinel.primaries, ri->name) != DICT_OK) {
 +            addReplyError(c, "Old master delete failed");
 +            return 1;
 +        }
@@ -1299,8 +1287,8 @@ index 000000000..d3c31052a
 +    return 0;
 +}
 +
-+static int sentinelUpdateMasterAddr(client *c, sds addr) {
-+    sentinelRedisInstance *ri = sentinelGetMasterOrReplyError(c);
++static int sentinelUpdatePrimaryAddr(client *c, sds addr) {
++    sentinelValkeyInstance *ri = sentinelGetPrimaryOrReplyError(c);
 +
 +    if (!ri) {
 +        return 1;
@@ -1313,18 +1301,21 @@ index 000000000..d3c31052a
 +    }
 +
 +    if (!sentinelAddrOrHostnameEqual(newValue, ri->addr)) {
++        int needEvent = 0;
 +        if (ri->addr->port != 0) {
-+            dictRelease(ri->slaves);
-+            ri->slaves = dictCreate(&instancesDictType);
-+            sentinelEvent(LL_WARNING,"+switch-master",ri,"%s %s %d %s %d",ri->name,
-+                          announceSentinelAddr(ri->addr), ri->addr->port,
-+                          announceSentinelAddr(newValue), newValue->port);
-+            // A workaround for lettuce
-+            sentinelEvent(LL_WARNING,"failover-end",ri,"%@");
++            needEvent = 1;
++            dictRelease(ri->replicas);
++            ri->replicas = dictCreate(&instancesDictType);
++            sentinelEvent(LL_WARNING, "+switch-master", ri, "%s %s %d %s %d", ri->name, announceSentinelAddr(ri->addr),
++                          ri->addr->port, announceSentinelAddr(newValue), newValue->port);
 +            ri->role_reported_time = mstime();
 +        }
 +        releaseSentinelAddr(ri->addr);
 +        ri->addr = newValue;
++        if (needEvent == 1) {
++            // A workaround for lettuce
++            sentinelEvent(LL_WARNING, "failover-end", ri, "%@");
++        }
 +    } else {
 +        releaseSentinelAddr(newValue);
 +    }
@@ -1332,14 +1323,14 @@ index 000000000..d3c31052a
 +    return 0;
 +}
 +
-+static int sentinelUpdateMasterSpec(client *c, sds spec) {
++static int sentinelUpdatePrimarySpec(client *c, sds spec) {
 +    int count = 0;
 +    int ret = 0;
 +    unsigned long quorum = 0;
 +    unsigned long parallel_syncs = 0;
 +    uint64_t config_epoch = 0;
 +
-+    sentinelRedisInstance *ri = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *ri = sentinelGetPrimaryOrReplyError(c);
 +
 +    if (!ri) {
 +        return 1;
@@ -1382,33 +1373,33 @@ index 000000000..d3c31052a
 +    config_epoch = strtoull(args[3], NULL, 10);
 +    if (ri->config_epoch != config_epoch) {
 +        ri->config_epoch = config_epoch;
-+        sentinelEvent(LL_WARNING,"+new-epoch",ri,"%llu",ri->config_epoch);
++        sentinelEvent(LL_WARNING, "+new-epoch", ri, "%llu", ri->config_epoch);
 +    }
 +
 +    sdsfreesplitres(args, count);
 +    return ret;
 +}
 +
-+static int sentinelUpdateSlaveSpec(client *c, sds spec) {
++static int sentinelUpdateReplicaSpec(client *c, sds spec) {
 +    int count = 0;
 +    int ret = 0;
 +    unsigned long port = 0;
 +    sentinelAddr *addr = NULL;
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +
-+    sentinelRedisInstance *masterRi = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *primaryRi = sentinelGetPrimaryOrReplyError(c);
 +
-+    if (!masterRi) {
++    if (!primaryRi) {
 +        return 1;
 +    }
 +
 +    sds *args = sdssplitlen(spec, sdslen(spec), " ", 1, &count);
 +    if (args == NULL) {
-+        addReplyErrorFormat(c, "Unable to split slave spec: %s", spec);
++        addReplyErrorFormat(c, "Unable to split replica spec: %s", spec);
 +        return 1;
 +    }
 +    if (count != 10) {
-+        addReplyErrorFormat(c, "Unexpected number of tokens in slave spec: %d (expected 10)", count);
++        addReplyErrorFormat(c, "Unexpected number of tokens in replica spec: %d (expected 10)", count);
 +        sdsfreesplitres(args, count);
 +        return 1;
 +    }
@@ -1427,10 +1418,10 @@ index 000000000..d3c31052a
 +        goto cleanup;
 +    }
 +
-+    ri = sentinelRedisInstanceLookupSlave(masterRi, addr->hostname, addr->port);
++    ri = sentinelValkeyInstanceLookupReplica(primaryRi, addr->hostname, addr->port);
 +
 +    if (ri == NULL) {
-+        addReplyErrorFormat(c, "Unable to find slave by %s:%d", addr->hostname, addr->port);
++        addReplyErrorFormat(c, "Unable to find replica by %s:%d", addr->hostname, addr->port);
 +        ret = 1;
 +        goto cleanup;
 +    }
@@ -1440,18 +1431,18 @@ index 000000000..d3c31052a
 +        ri->runid = sdsdup(args[2]);
 +    }
 +
-+    ri->master_link_down_time = strtoll(args[3], NULL, 10);
-+    ri->slave_priority = strtol(args[4], NULL, 10);
++    ri->primary_link_down_time = strtoll(args[3], NULL, 10);
++    ri->replica_priority = strtol(args[4], NULL, 10);
 +    ri->replica_announced = strtol(args[5], NULL, 10);
 +
-+    if (ri->slave_master_host == NULL || strcmp(args[6], ri->slave_master_host)) {
-+        sdsfree(ri->slave_master_host);
-+        ri->slave_master_host = sdsdup(args[6]);
++    if (ri->replica_primary_host == NULL || strcmp(args[6], ri->replica_primary_host)) {
++        sdsfree(ri->replica_primary_host);
++        ri->replica_primary_host = sdsdup(args[6]);
 +    }
 +
-+    ri->slave_master_port = strtoul(args[7], NULL, 10);
-+    ri->slave_master_link_status = strtol(args[8], NULL, 10);
-+    ri->slave_repl_offset = strtoull(args[9], NULL, 10);
++    ri->replica_primary_port = strtoul(args[7], NULL, 10);
++    ri->replica_primary_link_status = strtol(args[8], NULL, 10);
++    ri->replica_repl_offset = strtoull(args[9], NULL, 10);
 +cleanup:
 +    if (addr != NULL) {
 +        releaseSentinelAddr(addr);
@@ -1461,29 +1452,29 @@ index 000000000..d3c31052a
 +}
 +
 +static int sentinelAddReplica(client *c, sds addr) {
-+    sentinelRedisInstance *masterRi = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *primaryRi = sentinelGetPrimaryOrReplyError(c);
 +
-+    if (!masterRi) {
++    if (!primaryRi) {
 +        return 1;
 +    }
 +
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +    sentinelAddr *addrValue = parseAddr(c, addr);
 +
 +    if (addrValue == NULL) {
 +        return 1;
 +    }
 +
-+    if (sentinelAddrIsEqual(masterRi->addr, addrValue)) {
++    if (sentinelAddrIsEqual(primaryRi->addr, addrValue)) {
 +        addReplyErrorFormat(c, "Addr %s:%d is already assigned to master", addrValue->hostname, addrValue->port);
 +        releaseSentinelAddr(addrValue);
 +        return 1;
 +    }
 +
-+    ri = createSentinelRedisInstance("", SRI_SLAVE, addrValue->hostname, addrValue->port, 0, masterRi);
++    ri = createsentinelValkeyInstance("", SRI_REPLICA, addrValue->hostname, addrValue->port, 0, primaryRi);
 +    if (ri) {
 +        ri->role_reported_time = mstime();
-+        sentinelEvent(LL_NOTICE,"+slave",ri,"%@");
++        sentinelEvent(LL_NOTICE, "+slave", ri, "%@");
 +    }
 +
 +    releaseSentinelAddr(addrValue);
@@ -1491,9 +1482,9 @@ index 000000000..d3c31052a
 +}
 +
 +static int sentinelAddSentinel(client *c, sds spec) {
-+    sentinelRedisInstance *masterRi = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *primaryRi = sentinelGetPrimaryOrReplyError(c);
 +
-+    if (!masterRi) {
++    if (!primaryRi) {
 +        return 1;
 +    }
 +
@@ -1527,12 +1518,12 @@ index 000000000..d3c31052a
 +        goto cleanup;
 +    }
 +
-+    sentinelRedisInstance *ri = NULL;
++    sentinelValkeyInstance *ri = NULL;
 +
-+    ri = createSentinelRedisInstance(args[0], SRI_SENTINEL, addr->hostname, addr->port, 0, masterRi);
++    ri = createsentinelValkeyInstance(args[0], SRI_SENTINEL, addr->hostname, addr->port, 0, primaryRi);
 +    if (ri) {
 +        ri->runid = sdsdup(args[1]);
-+        sentinelEvent(LL_NOTICE,"+sentinel",ri,"%@");
++        sentinelEvent(LL_NOTICE, "+sentinel", ri, "%@");
 +    }
 +
 +cleanup:
@@ -1544,7 +1535,7 @@ index 000000000..d3c31052a
 +}
 +
 +static int sentinelDeleteReplica(client *c, sds addr) {
-+    sentinelRedisInstance *ri = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *ri = sentinelGetPrimaryOrReplyError(c);
 +
 +    if (!ri) {
 +        return 1;
@@ -1556,7 +1547,7 @@ index 000000000..d3c31052a
 +    }
 +
 +    sds key = announceSentinelAddrAndPort(addrValue);
-+    dictDelete(ri->slaves, key);
++    dictDelete(ri->replicas, key);
 +    sdsfree(key);
 +
 +    releaseSentinelAddr(addrValue);
@@ -1564,7 +1555,7 @@ index 000000000..d3c31052a
 +}
 +
 +static int sentinelDeleteSentinel(client *c, sds name) {
-+    sentinelRedisInstance *ri = sentinelGetMasterOrReplyError(c);
++    sentinelValkeyInstance *ri = sentinelGetPrimaryOrReplyError(c);
 +
 +    if (!ri) {
 +        return 1;
@@ -1577,13 +1568,13 @@ index 000000000..d3c31052a
 +
 +static int sentinelUpdateCacheKey(client *c, sds key, sds value) {
 +    if (!strcasecmp(key, "master-name")) {
-+        return sentinelUpdateMasterName(c, value);
++        return sentinelUpdatePrimaryName(c, value);
 +    } else if (!strcasecmp(key, "master-addr")) {
-+        return sentinelUpdateMasterAddr(c, value);
++        return sentinelUpdatePrimaryAddr(c, value);
 +    } else if (!strcasecmp(key, "master-spec")) {
-+        return sentinelUpdateMasterSpec(c, value);
++        return sentinelUpdatePrimarySpec(c, value);
 +    } else if (!strcasecmp(key, "slave-spec")) {
-+        return sentinelUpdateSlaveSpec(c, value);
++        return sentinelUpdateReplicaSpec(c, value);
 +    } else if (!strcasecmp(key, "add-replica")) {
 +        return sentinelAddReplica(c, value);
 +    } else if (!strcasecmp(key, "add-sentinel")) {
@@ -1607,8 +1598,7 @@ index 000000000..d3c31052a
 +} CacheParserState;
 +
 +/* Update internal state with provided one */
-+static void sentinelCacheUpdate(client *c)
-+{
++static void sentinelCacheUpdate(client *c) {
 +    sds updateCommand = sdsempty();
 +    sds key = sdsempty();
 +    sds value = sdsempty();
@@ -1627,52 +1617,52 @@ index 000000000..d3c31052a
 +
 +    for (size_t i = 0; i < sdslen(updateCommand); i++) {
 +        switch (parserState) {
-+            case CPS_KEY_START:
-+                if (!isspace(updateCommand[i])) {
-+                    parserState = CPS_KEY_READ;
-+                    keyStart = i;
-+                }
-+                break;
-+            case CPS_KEY_READ:
-+                if (updateCommand[i] == ':') {
-+                    keyEnd = i;
-+                    parserState = CPS_VALUE_START;
-+                }
-+                break;
-+            case CPS_VALUE_START:
-+                if (!isspace(updateCommand[i])) {
-+                    parserState = CPS_VALUE_READ;
-+                    valueStart = i;
-+                }
-+                break;
-+            case CPS_VALUE_READ:
-+                if (updateCommand[i] == ',') {
-+                    parserState = CPS_KEY_START;
-+                    valueEnd = i;
-+                    key = sdscatlen(key, updateCommand+keyStart, keyEnd-keyStart);
-+                    value = sdscatlen(value, updateCommand+valueStart, valueEnd-valueStart);
-+                    if (sentinelUpdateCacheKey(c, key, value) != 0) {
-+                        goto cleanup;
-+                    }
-+                    sdsfree(key);
-+                    sdsfree(value);
-+                    key = sdsempty();
-+                    value = sdsempty();
++        case CPS_KEY_START:
++            if (!isspace(updateCommand[i])) {
++                parserState = CPS_KEY_READ;
++                keyStart = i;
++            }
++            break;
++        case CPS_KEY_READ:
++            if (updateCommand[i] == ':') {
++                keyEnd = i;
++                parserState = CPS_VALUE_START;
++            }
++            break;
++        case CPS_VALUE_START:
++            if (!isspace(updateCommand[i])) {
++                parserState = CPS_VALUE_READ;
++                valueStart = i;
++            }
++            break;
++        case CPS_VALUE_READ:
++            if (updateCommand[i] == ',') {
++                parserState = CPS_KEY_START;
++                valueEnd = i;
++                key = sdscatlen(key, updateCommand + keyStart, keyEnd - keyStart);
++                value = sdscatlen(value, updateCommand + valueStart, valueEnd - valueStart);
++                if (sentinelUpdateCacheKey(c, key, value) != 0) {
++                    goto cleanup;
 +                }
-+                break;
++                sdsfree(key);
++                sdsfree(value);
++                key = sdsempty();
++                value = sdsempty();
++            }
++            break;
 +        }
 +    }
 +
 +    if (parserState == CPS_VALUE_READ) {
-+        key = sdscatlen(key, updateCommand+keyStart, keyEnd-keyStart);
-+        value = sdscatlen(value, updateCommand+valueStart, sdslen(updateCommand)-valueStart);
++        key = sdscatlen(key, updateCommand + keyStart, keyEnd - keyStart);
++        value = sdscatlen(value, updateCommand + valueStart, sdslen(updateCommand) - valueStart);
 +        if (sentinelUpdateCacheKey(c, key, value) == 0) {
 +            parserState = CPS_KEY_START;
 +        }
 +    }
 +
 +    if (parserState == CPS_KEY_START) {
-+        addReply(c,shared.ok);
++        addReply(c, shared.ok);
 +    }
 +
 +cleanup:
@@ -1682,154 +1672,145 @@ index 000000000..d3c31052a
 +}
 +
 +void sentinelCommand(client *c) {
-+    if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) {
-+        const char *help[] = {
-+"CKQUORUM <master-name>",
-+"    Check if the current Sentinel configuration is able to reach the quorum",
-+"    needed to failover a master and the majority needed to authorize the",
-+"    failover.",
-+"GET-MASTER-ADDR-BY-NAME <master-name>",
-+"    Return the ip and port number of the master with that name.",
-+"INFO-CACHE <master-name>",
-+"    Return last cached INFO output from masters and all its replicas.",
-+"MASTER <master-name>",
-+"    Show the state and info of the specified master.",
-+"MASTERS",
-+"    Show a list of monitored masters and their state.",
-+"MYID",
-+"    Return the ID of the Sentinel instance.",
-+"REPLICAS <master-name>",
-+"    Show a list of replicas for this master and their state.",
-+"SENTINELS <master-name>",
-+"    Show a list of Sentinel instances for this master and their state.",
-+NULL
-+        };
++    if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr, "help")) {
++        const char *help[] = {"CKQUORUM <master-name>",
++                              "    Check if the current Sentinel configuration is able to reach the quorum",
++                              "    needed to failover a master and the majority needed to authorize the",
++                              "    failover.",
++                              "GET-MASTER-ADDR-BY-NAME <master-name>",
++                              "    Return the ip and port number of the master with that name.",
++                              "INFO-CACHE <master-name>",
++                              "    Return last cached INFO output from primaries and all its replicas.",
++                              "MASTER <master-name>",
++                              "    Show the state and info of the specified master.",
++                              "MASTERS",
++                              "    Show a list of monitored masters and their state.",
++                              "MYID",
++                              "    Return the ID of the Sentinel instance.",
++                              "SLAVES <master-name>",
++                              "    Show a list of slaves for this master and their state.",
++                              "SENTINELS <master-name>",
++                              "    Show a list of Sentinel instances for this master and their state.",
++                              NULL};
 +        addReplyHelp(c, help);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"cache-update")) {
++    } else if (!strcasecmp(c->argv[1]->ptr, "cache-update")) {
 +        if (c->argc < 4) goto numargserr;
-+        if (!strcasecmp(c->argv[2]->ptr,sentinel.cache_update_secret)) {
++        if (!strcasecmp(c->argv[2]->ptr, sentinel.cache_update_secret)) {
 +            sentinelCacheUpdate(c);
 +        } else {
 +            addReplySubcommandSyntaxError(c);
 +        }
-+    } else if (!strcasecmp(c->argv[1]->ptr,"masters")) {
++    } else if (!strcasecmp(c->argv[1]->ptr, "masters")) {
 +        /* SENTINEL MASTERS */
 +        if (c->argc != 2) goto numargserr;
-+        addReplyDictOfRedisInstances(c,sentinel.masters);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"master")) {
++        addReplyDictOfValkeyInstances(c, sentinel.primaries);
++    } else if (!strcasecmp(c->argv[1]->ptr, "master")) {
 +        /* SENTINEL MASTER <name> */
-+        sentinelRedisInstance *ri;
++        sentinelValkeyInstance *ri;
 +
 +        if (c->argc != 3) goto numargserr;
-+        if ((ri = sentinelGetMasterOrReplyError(c))
-+            == NULL) return;
-+        addReplySentinelRedisInstance(c,ri);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"slaves") ||
-+               !strcasecmp(c->argv[1]->ptr,"replicas"))
-+    {
++        if ((ri = sentinelGetPrimaryOrReplyError(c)) == NULL) return;
++        addReplysentinelValkeyInstance(c, ri);
++    } else if (!strcasecmp(c->argv[1]->ptr, "slaves") || !strcasecmp(c->argv[1]->ptr, "replicas")) {
 +        /* SENTINEL REPLICAS <master-name> */
-+        sentinelRedisInstance *ri;
++        sentinelValkeyInstance *ri;
 +
 +        if (c->argc != 3) goto numargserr;
-+        if ((ri = sentinelGetMasterOrReplyError(c)) == NULL)
-+            return;
-+        addReplyDictOfRedisInstances(c,ri->slaves);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"sentinels")) {
++        if ((ri = sentinelGetPrimaryOrReplyError(c)) == NULL) return;
++        addReplyDictOfValkeyInstances(c, ri->replicas);
++    } else if (!strcasecmp(c->argv[1]->ptr, "sentinels")) {
 +        /* SENTINEL SENTINELS <master-name> */
-+        sentinelRedisInstance *ri;
++        sentinelValkeyInstance *ri;
 +
 +        if (c->argc != 3) goto numargserr;
-+        if ((ri = sentinelGetMasterOrReplyError(c)) == NULL)
-+            return;
-+        addReplyDictOfRedisInstances(c,ri->sentinels);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"myid") && c->argc == 2) {
++        if ((ri = sentinelGetPrimaryOrReplyError(c)) == NULL) return;
++        addReplyDictOfValkeyInstances(c, ri->sentinels);
++    } else if (!strcasecmp(c->argv[1]->ptr, "myid") && c->argc == 2) {
 +        /* SENTINEL MYID */
-+        addReplyBulkCBuffer(c,sentinel.myid,CONFIG_RUN_ID_SIZE);
-+    } else if (!strcasecmp(c->argv[1]->ptr,"get-master-addr-by-name")) {
++        addReplyBulkCBuffer(c, sentinel.myid, CONFIG_RUN_ID_SIZE);
++    } else if (!strcasecmp(c->argv[1]->ptr, "get-master-addr-by-name") ||
++               !strcasecmp(c->argv[1]->ptr, "get-master-addr-by-name")) {
 +        /* SENTINEL GET-MASTER-ADDR-BY-NAME <master-name> */
-+        sentinelRedisInstance *ri;
++        sentinelValkeyInstance *ri;
 +
 +        if (c->argc != 3) goto numargserr;
-+        ri = sentinelGetMaster();
++        ri = sentinelGetPrimary();
 +        if (ri == NULL) {
 +            addReplyNullArray(c);
 +        } else {
-+            sentinelAddr *addr = sentinelGetCurrentMasterAddress(ri);
++            sentinelAddr *addr = sentinelGetCurrentPrimaryAddress(ri);
 +
-+            addReplyArrayLen(c,2);
-+            addReplyBulkCString(c,announceSentinelAddr(addr));
-+            addReplyBulkLongLong(c,addr->port);
++            addReplyArrayLen(c, 2);
++            addReplyBulkCString(c, announceSentinelAddr(addr));
++            addReplyBulkLongLong(c, addr->port);
 +        }
-+    } else if (!strcasecmp(c->argv[1]->ptr,"ckquorum")) {
++    } else if (!strcasecmp(c->argv[1]->ptr, "ckquorum")) {
 +        /* SENTINEL CKQUORUM <name> */
 +        if (c->argc != 3) goto numargserr;
-+        addReplySds(c, sdscatfmt(sdsempty(),
-+            "+OK 1 usable Sentinels. Quorum and failover authorization "
-+            "can be reached\r\n"));
-+    } else if (!strcasecmp(c->argv[1]->ptr,"info-cache")) {
++        addReplySds(c, sdscatfmt(sdsempty(), "+OK 1 usable Sentinels. Quorum and failover authorization "
++                                             "can be reached\r\n"));
++    } else if (!strcasecmp(c->argv[1]->ptr, "info-cache")) {
 +        /* SENTINEL INFO-CACHE <name> */
 +        if (c->argc < 2) goto numargserr;
 +        mstime_t now = mstime();
 +
 +        /* Create an ad-hoc dictionary type so that we can iterate
-+         * a dictionary composed of just the master groups the user
++         * a dictionary composed of just the primary groups the user
 +         * requested. */
 +        dictType copy_keeper = instancesDictType;
 +        copy_keeper.valDestructor = NULL;
-+        dict *masters_local = sentinel.masters;
++        dict *primaries_local = sentinel.primaries;
 +        if (c->argc > 2) {
-+            masters_local = dictCreate(&copy_keeper);
++            primaries_local = dictCreate(&copy_keeper);
 +
 +            for (int i = 2; i < c->argc; i++) {
-+                sentinelRedisInstance *ri;
-+                ri = sentinelGetMasterByName(c->argv[i]->ptr);
++                sentinelValkeyInstance *ri;
++                ri = sentinelGetPrimaryByName(c->argv[i]->ptr);
 +                if (!ri) continue; /* ignore non-existing names */
-+                dictAdd(masters_local, ri->name, ri);
++                dictAdd(primaries_local, ri->name, ri);
 +            }
 +        }
 +
 +        /* Reply format:
-+         *   1.) master name
-+         *   2.) 1.) info from master
++         *   1.) primary name
++         *   2.) 1.) info from primary
 +         *       2.) info from replica
 +         *       ...
-+         *   3.) other master name
++         *   3.) other primary name
 +         *   ...
 +         */
-+        addReplyArrayLen(c,dictSize(masters_local) * 2);
++        addReplyArrayLen(c, dictSize(primaries_local) * 2);
 +
-+        dictIterator  *di;
++        dictIterator *di;
 +        dictEntry *de;
-+        di = dictGetIterator(masters_local);
++        di = dictGetIterator(primaries_local);
 +        while ((de = dictNext(di)) != NULL) {
-+            sentinelRedisInstance *ri = dictGetVal(de);
-+            addReplyBulkCBuffer(c,ri->name,strlen(ri->name));
-+            addReplyArrayLen(c,dictSize(ri->slaves) + 1); /* +1 for self */
-+            addReplyArrayLen(c,2);
-+            addReplyLongLong(c,
-+                ri->info_refresh ? (now - ri->info_refresh) : 0);
++            sentinelValkeyInstance *ri = dictGetVal(de);
++            addReplyBulkCBuffer(c, ri->name, strlen(ri->name));
++            addReplyArrayLen(c, dictSize(ri->replicas) + 1); /* +1 for self */
++            addReplyArrayLen(c, 2);
++            addReplyLongLong(c, ri->info_refresh ? (now - ri->info_refresh) : 0);
 +            if (ri->info)
-+                addReplyBulkCBuffer(c,ri->info,sdslen(ri->info));
++                addReplyBulkCBuffer(c, ri->info, sdslen(ri->info));
 +            else
 +                addReplyNull(c);
 +
 +            dictIterator *sdi;
 +            dictEntry *sde;
-+            sdi = dictGetIterator(ri->slaves);
++            sdi = dictGetIterator(ri->replicas);
 +            while ((sde = dictNext(sdi)) != NULL) {
-+                sentinelRedisInstance *sri = dictGetVal(sde);
-+                addReplyArrayLen(c,2);
-+                addReplyLongLong(c,
-+                    ri->info_refresh ? (now - sri->info_refresh) : 0);
++                sentinelValkeyInstance *sri = dictGetVal(sde);
++                addReplyArrayLen(c, 2);
++                addReplyLongLong(c, ri->info_refresh ? (now - sri->info_refresh) : 0);
 +                if (sri->info)
-+                    addReplyBulkCBuffer(c,sri->info,sdslen(sri->info));
++                    addReplyBulkCBuffer(c, sri->info, sdslen(sri->info));
 +                else
 +                    addReplyNull(c);
 +            }
 +            dictReleaseIterator(sdi);
 +        }
 +        dictReleaseIterator(di);
-+        if (masters_local != sentinel.masters) dictRelease(masters_local);
++        if (primaries_local != sentinel.primaries) dictRelease(primaries_local);
 +    } else {
 +        addReplySubcommandSyntaxError(c);
 +    }
@@ -1848,24 +1829,22 @@ index 000000000..d3c31052a
 +    static dict *cached_all_info_sections = NULL;
 +
 +    /* Get requested section list. */
-+    dict *sections_dict = genInfoSectionDict(c->argv+1, c->argc-1, sentinel_sections, &sec_all, &sec_everything);
++    dict *sections_dict = genInfoSectionDict(c->argv + 1, c->argc - 1, sentinel_sections, &sec_all, &sec_everything);
 +
 +    /* Purge unsupported sections from the requested ones. */
 +    dictEntry *de;
 +    dictIterator *di = dictGetSafeIterator(sections_dict);
-+    while((de = dictNext(di)) != NULL) {
++    while ((de = dictNext(di)) != NULL) {
 +        int i;
 +        sds sec = dictGetKey(de);
-+        for (i=0; sentinel_sections[i]; i++)
-+            if (!strcasecmp(sentinel_sections[i], sec))
-+                break;
++        for (i = 0; sentinel_sections[i]; i++)
++            if (!strcasecmp(sentinel_sections[i], sec)) break;
 +        /* section not found? remove it */
-+        if (!sentinel_sections[i])
-+            dictDelete(sections_dict, sec);
++        if (!sentinel_sections[i]) dictDelete(sections_dict, sec);
 +    }
 +    dictReleaseIterator(di);
 +
-+    /* Insert explicit all sections (don't pass these vars to genRedisInfoString) */
++    /* Insert explicit all sections (don't pass these vars to genValkeyInfoString) */
 +    if (sec_all || sec_everything) {
 +        releaseInfoSectionDict(sections_dict);
 +        /* We cache this dict as an optimization. */
@@ -1876,66 +1855,61 @@ index 000000000..d3c31052a
 +        sections_dict = cached_all_info_sections;
 +    }
 +
-+    sds info = genRedisInfoString(sections_dict, 0, 0);
++    sds info = genValkeyInfoString(sections_dict, 0, 0);
 +    if (sec_all || (dictFind(sections_dict, "sentinel") != NULL)) {
 +        dictIterator *di;
 +        dictEntry *de;
-+        int master_id = 0;
++        int primary_id = 0;
 +
-+        if (sdslen(info) != 0)
-+            info = sdscat(info,"\r\n");
++        if (sdslen(info) != 0) info = sdscat(info, "\r\n");
 +        info = sdscatprintf(info,
-+            "# Sentinel\r\n"
-+            "sentinel_masters:%lu\r\n"
-+            "sentinel_tilt:%d\r\n"
-+            "sentinel_tilt_since_seconds:%jd\r\n"
-+            "sentinel_running_scripts:%d\r\n"
-+            "sentinel_scripts_queue_length:%ld\r\n"
-+            "sentinel_simulate_failure_flags:%lu\r\n",
-+            dictSize(sentinel.masters),
-+            sentinel.tilt,
-+            sentinel.tilt ? (intmax_t)((mstime()-sentinel.tilt_start_time)/1000) : -1,
-+            sentinel.running_scripts,
-+            0L,
-+            sentinel.simfailure_flags);
-+
-+        di = dictGetIterator(sentinel.masters);
-+        while((de = dictNext(di)) != NULL) {
-+            sentinelRedisInstance *ri = dictGetVal(de);
++                            "# Sentinel\r\n"
++                            "sentinel_primaries:%lu\r\n"
++                            "sentinel_tilt:%d\r\n"
++                            "sentinel_tilt_since_seconds:%jd\r\n"
++                            "sentinel_running_scripts:%d\r\n"
++                            "sentinel_scripts_queue_length:%ld\r\n"
++                            "sentinel_simulate_failure_flags:%lu\r\n",
++                            dictSize(sentinel.primaries), sentinel.tilt,
++                            sentinel.tilt ? (intmax_t)((mstime() - sentinel.tilt_start_time) / 1000) : -1,
++                            sentinel.running_scripts, 0L, sentinel.simfailure_flags);
++
++        di = dictGetIterator(sentinel.primaries);
++        while ((de = dictNext(di)) != NULL) {
++            sentinelValkeyInstance *ri = dictGetVal(de);
 +            char *status = "ok";
 +
-+            if (ri->flags & SRI_O_DOWN) status = "odown";
-+            else if (ri->flags & SRI_S_DOWN) status = "sdown";
++            if (ri->flags & SRI_O_DOWN)
++                status = "odown";
++            else if (ri->flags & SRI_S_DOWN)
++                status = "sdown";
 +            info = sdscatprintf(info,
-+                "master%d:name=%s,status=%s,address=%s:%d,"
-+                "slaves=%lu,sentinels=%lu\r\n",
-+                master_id++, ri->name, status,
-+                announceSentinelAddr(ri->addr), ri->addr->port,
-+                dictSize(ri->slaves),
-+                dictSize(ri->sentinels)+1);
++                                "master%d:name=%s,status=%s,address=%s:%d,"
++                                "slaves=%lu,sentinels=%lu\r\n",
++                                primary_id++, ri->name, status, announceSentinelAddr(ri->addr), ri->addr->port,
++                                dictSize(ri->replicas), dictSize(ri->sentinels) + 1);
 +        }
 +        dictReleaseIterator(di);
 +    }
-+    if (sections_dict != cached_all_info_sections)
-+        releaseInfoSectionDict(sections_dict);
++    if (sections_dict != cached_all_info_sections) releaseInfoSectionDict(sections_dict);
 +    addReplyBulkSds(c, info);
 +}
 +
 +/* Implements Sentinel version of the ROLE command. The output is
-+ * "sentinel" and the list of currently monitored master names. */
++ * "sentinel" and the list of currently monitored primary names. */
 +void sentinelRoleCommand(client *c) {
 +    dictIterator *di;
 +    dictEntry *de;
 +
-+    addReplyArrayLen(c,2);
-+    addReplyBulkCBuffer(c,"sentinel",8);
-+    addReplyArrayLen(c,dictSize(sentinel.masters));
++    addReplyArrayLen(c, 2);
++    addReplyBulkCBuffer(c, "sentinel", 8);
++    addReplyArrayLen(c, dictSize(sentinel.primaries));
 +
-+    di = dictGetIterator(sentinel.masters);
-+    while((de = dictNext(di)) != NULL) {
-+        sentinelRedisInstance *ri = dictGetVal(de);
++    di = dictGetIterator(sentinel.primaries);
++    while ((de = dictNext(di)) != NULL) {
++        sentinelValkeyInstance *ri = dictGetVal(de);
 +
-+        addReplyBulkCString(c,ri->name);
++        addReplyBulkCString(c, ri->name);
 +    }
 +    dictReleaseIterator(di);
 +}
@@ -1949,45 +1923,45 @@ index 000000000..d3c31052a
 +    /* just do nothing */
 +}
 diff --git a/src/server.c b/src/server.c
-index 4b1f4242a..38c7ee643 100644
+index 57d7e32db..b3248bc45 100644
 --- a/src/server.c
 +++ b/src/server.c
-@@ -6760,6 +6760,8 @@ void memtest(size_t megabytes, int passes);
- int checkForSentinelMode(int argc, char **argv, char *exec_name) {
-     if (strstr(exec_name,"redis-sentinel") != NULL) return 1;
+@@ -6478,6 +6478,8 @@ int checkForSentinelMode(int argc, char **argv, char *exec_name) {
+     /* valkey may install symlinks like redis-sentinel -> valkey-sentinel. */
+     if (strstr(exec_name, "redis-sentinel") != NULL) return 1;
  
-+    if (strstr(exec_name,"redis-senticache") != NULL) return 1;
++    if (strstr(exec_name, "valkey-senticache") != NULL) return 1;
 +
      for (int j = 1; j < argc; j++)
-         if (!strcmp(argv[j],"--sentinel")) return 1;
+         if (!strcmp(argv[j], "--sentinel")) return 1;
      return 0;
 diff --git a/tests/instances.tcl b/tests/instances.tcl
-index 56a51a872..6f023f8cb 100644
+index 5cc96b0ed..4ff12cfb5 100644
 --- a/tests/instances.tcl
 +++ b/tests/instances.tcl
-@@ -25,10 +25,12 @@ set ::dont_clean 0
+@@ -26,10 +26,12 @@ set ::dont_clean 0
  set ::simulate_error 0
  set ::failed 0
  set ::sentinel_instances {}
 +set ::senticache_instances {}
- set ::redis_instances {}
+ set ::valkey_instances {}
  set ::global_config {}
  set ::sentinel_base_port 20000
- set ::redis_base_port 30000
+ set ::valkey_base_port 30000
 +set ::senticache_base_port 40000
- set ::redis_port_count 1024
+ set ::valkey_port_count 1024
  set ::host "127.0.0.1"
  set ::leaked_fds_file [file normalize "tmp/leaked_fds.txt"]
-@@ -51,6 +53,8 @@ proc exec_instance {type dirname cfgfile} {
-         set prgname redis-server
+@@ -53,6 +55,8 @@ proc exec_instance {type dirname cfgfile} {
+         set prgname valkey-server
      } elseif {$type eq "sentinel"} {
-         set prgname redis-sentinel
+         set prgname valkey-sentinel
 +    } elseif {$type eq "senticache"} {
-+        set prgname redis-senticache
++        set prgname valkey-senticache
      } else {
          error "Unknown instance type."
      }
-@@ -526,6 +530,21 @@ proc S {n args} {
+@@ -546,6 +550,21 @@ proc S {n args} {
      [dict get $s link] {*}$args
  }
  
@@ -2002,11 +1976,11 @@ index 56a51a872..6f023f8cb 100644
 +    set s [lindex $::senticache_instances 0]
 +    set port [dict get $s port]
 +    set host [dict get $s host]
-+    set client [redis $host $port 1 $::tls]
++    set client [valkey $host $port 1 $::tls]
 +    return $client
 +}
 +
- # Returns a Redis instance by index.
+ # Returns a server instance by index.
  # Example:
  #     [Rn 0] info
 diff --git a/tests/senticache/run.tcl b/tests/senticache/run.tcl
@@ -2156,7 +2130,7 @@ index 000000000..a650d0604
 +}
 diff --git a/tests/senticache/tests/01-pubsub.tcl b/tests/senticache/tests/01-pubsub.tcl
 new file mode 100644
-index 000000000..a0ffc0e5f
+index 000000000..21d50d8b9
 --- /dev/null
 +++ b/tests/senticache/tests/01-pubsub.tcl
 @@ -0,0 +1,27 @@
@@ -2183,7 +2157,7 @@ index 000000000..a0ffc0e5f
 +    assert_equal {1} [psubscribe $sclient {*}]
 +    SC SENTINEL CACHE-UPDATE mysecret master-addr: 127.0.0.3 20002, add-replica: 127.0.0.3 20001
 +    assert_equal {pmessage * +switch-master {initial 127.0.0.3 20001 127.0.0.3 20002}} [$sclient read]
-+    assert_equal {pmessage * failover-end {master initial 127.0.0.3 20001}} [$sclient read]
++    assert_equal {pmessage * failover-end {master initial 127.0.0.3 20002}} [$sclient read]
 +    assert_equal {pmessage * +slave {slave 127.0.0.3:20001 127.0.0.3 20001 @ initial 127.0.0.3 20002}} [$sclient read]
 +    $sclient close
 +}
@@ -2220,15 +2194,18 @@ index 000000000..4944e0096
 @@ -0,0 +1 @@
 +senticache_*
 diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl
-index 7b6439508..1884bc2ed 100644
+index 33e590ab5..4f766459e 100644
 --- a/tests/sentinel/tests/00-base.tcl
 +++ b/tests/sentinel/tests/00-base.tcl
-@@ -82,7 +82,7 @@ test "Basic failover works if the master is down" {
-     kill_instance redis $master_id
+@@ -82,7 +82,7 @@ test "Basic failover works if the primary is down" {
+     kill_instance valkey $master_id
      foreach_sentinel_id id {
          S $id sentinel debug ping-period 500
 -        S $id sentinel debug ask-period 500  
 +        S $id sentinel debug ask-period 500
          wait_for_condition 1000 100 {
-             [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+             [lindex [S $id SENTINEL GET-PRIMARY-ADDR-BY-NAME mymaster] 1] != $old_port
          } else {
+-- 
+2.48.0
+
diff --git a/redis_patches/build.sh b/valkey_patches/build.sh
similarity index 57%
rename from redis_patches/build.sh
rename to valkey_patches/build.sh
index d940119..656cca4 100755
--- a/redis_patches/build.sh
+++ b/valkey_patches/build.sh
@@ -5,11 +5,11 @@ set -xe
 apt update
 DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt -y install build-essential git
 cd /app
-git clone https://github.com/redis/redis.git
-cd redis
-git checkout 7.2.4
+git clone https://github.com/valkey-io/valkey.git
+cd valkey
+git checkout 8.0.2
 
-for i in ../redis_patches/*.patch
+for i in ../valkey_patches/*.patch
 do
     git apply "${i}"
 done