From f1e441e3eab61723f8290f09cc3053959601a57a Mon Sep 17 00:00:00 2001 From: Jaroslaw Grabowski Date: Tue, 1 Jun 2021 10:32:40 +0200 Subject: [PATCH 01/19] STAR-564 Check only MODIFY on base when updating table with MV (#17) If a user has only MODIFY permission on a table and there is a materialized view built on the same table an insert will fail with the following error: Unauthorized: Error from server: code=2100 [Unauthorized] Only base MODIFY permission is required to update base with MV. Co-authored-by: Zhao Yang (cherry picked from commit 55dad391f3f98577d9b0b7e923a5a9b4eacfd2ab) --- auth_test.py | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/auth_test.py b/auth_test.py index 6d92ff56e6..2a2b6ab8d9 100644 --- a/auth_test.py +++ b/auth_test.py @@ -541,10 +541,16 @@ def test_materialized_views_auth(self): * Create a new user, 'cathy', with no permissions * Create a ks, table * Connect as cathy + * * Try CREATE MV without ALTER permission on base table, assert throws Unauthorized * Grant cathy ALTER permissions, then CREATE MV successfully + * + * Try to MODIFY base without WRITE permission on base, assert throws Unauthorized + * Grant cathy WRITE permissions on base, and modify base successfully + * * Try to SELECT from the mv, assert throws Unauthorized - * Grant cathy SELECT permissions, and read from the MV successfully + * Grant cathy SELECT permissions on base, and read from the MV successfully + * * Revoke cathy's ALTER permissions, assert DROP MV throws Unauthorized * Restore cathy's ALTER permissions, DROP MV successfully """ @@ -565,12 +571,36 @@ def test_materialized_views_auth(self): cassandra.execute("GRANT ALTER ON ks.cf TO cathy") cathy.execute(create_mv) - # TRY SELECT MV without SELECT permission on base table - assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on or any of its parents") + # Try MODIFY base without WRITE permission on base + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") - # Grant SELECT permission and CREATE MV - cassandra.execute("GRANT SELECT ON ks.cf TO cathy") - cathy.execute("SELECT * FROM ks.mv1") + if self.cluster.version() >= LooseVersion('4.0'): + # From 4.0 onward, only base MODIFY permission is required to update base with MV + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + + # TRY SELECT MV without SELECT permission on base table + assert_unauthorized(cathy, "SELECT * FROM ks.cf", "User cathy has no SELECT permission on
or any of its parents") + assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on
or any of its parents") + + # Grant SELECT permission + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) + else: + # Before 4.0, MODIFY on MV is required to insert to base + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no SELECT permission on
or any of its parents") + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") + + # Grant WRITE permission on MV + cassandra.execute("GRANT MODIFY ON ks.mv1 TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) # Revoke ALTER permission and try DROP MV cassandra.execute("REVOKE ALTER ON ks.cf FROM cathy") From f2c37011d7dc21016fa33103625529e85994e595 Mon Sep 17 00:00:00 2001 From: Ruslan Fomkin Date: Tue, 8 Jun 2021 16:40:03 +0200 Subject: [PATCH 02/19] Add pytest cache and vscode folders to gitignore (#21) (cherry picked from commit b3bd616201ef5c41ad088954077a8467148c8626) --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 51321392fb..ed7f15ce3d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ last_test_dir upgrade html/ doxygen/doxypy-0.4.2/ +.pytest_cache/ +.vscode/ From a02abc60c8f4a8fe70e6268a8103c62124d421c5 Mon Sep 17 00:00:00 2001 From: Ruslan Fomkin Date: Fri, 11 Jun 2021 11:10:34 +0200 Subject: [PATCH 03/19] STAR-582 fix repair error on one node cluster (#20) Port of DB-1511, riptano/apollo-dtest#197 Co-authored-by: Zhao Yang (cherry picked from commit c7beefcbc97cd288badc48c483a3821dc694da58) --- repair_tests/repair_test.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py index ddae777343..4ff6d024ea 100644 --- a/repair_tests/repair_test.py +++ b/repair_tests/repair_test.py @@ -1195,6 +1195,40 @@ def run_repair(): else: node1.nodetool('repair keyspace1 standard1 -inc -par') + @since('3.0') + def test_repair_one_node_cluster(self): + options = [] + fix_STAR582 = self.cluster.version() >= "4.0" + if not fix_STAR582: + options = ['--ignore-unreplicated-keyspaces'] + options + self._repair_abort_test(options=options, nodes=1, rf=2) + + @since('3.0') + def test_repair_one_node_in_local_dc(self): + self._repair_abort_test(options=['--ignore-unreplicated-keyspaces', '--in-local-dc'], nodes=[1, 1], rf={'dc1': 1, 'dc2': 1}, no_common_range=True) + + def _repair_abort_test(self, options=[], nodes=1, rf=1, no_common_range=False): + cluster = self.cluster + logger.debug("Starting cluster..") + cluster.populate(nodes).start(wait_for_binary_proto=True) + + node1 = self.cluster.nodelist()[0] + session = self.patient_cql_connection(node1) + create_ks(session, 'ks', rf=rf) + + support_preview = self.cluster.version() >= "4.0" + if support_preview: + logger.debug("Preview repair") + out = node1.repair(["--preview"] + options) + if no_common_range: + assert "Nothing to repair for " in str(out), "Expect 'Nothing to repair for '" + + logger.debug("Full repair") + node1.repair(["--full"] + options) + + logger.debug("Incremental repair") + node1.repair(options) + @since('2.2') def test_dead_sync_initiator(self): """ From c204342acfd40a07518f8f67ffbf7ff1943d9c6e Mon Sep 17 00:00:00 2001 From: jacek-lewandowski Date: Fri, 2 Apr 2021 13:51:45 +0200 Subject: [PATCH 04/19] STAR-247: Allow to easily run tests against big or bti sstable format (cherry picked from commit 8c9680112dc2ba92b04460e695b74fa0273b2791) (cherry picked from commit 8f680140b5bc1ea1d42161832acee61f639e4a26) --- conftest.py | 3 +++ dtest_config.py | 7 +++++++ run_dtests.py | 3 ++- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/conftest.py b/conftest.py index 33e188fd52..79b9714191 100644 --- a/conftest.py +++ b/conftest.py @@ -43,6 +43,9 @@ def check_required_loopback_interfaces_available(): def pytest_addoption(parser): + parser.addoption("--sstable-format", action="store", default="bti", + help="SSTable format to be used by default for all newly created SSTables: " + "big or bti (default: bti)") parser.addoption("--use-vnodes", action="store_true", default=False, help="Determines wither or not to setup clusters using vnodes for tests") parser.addoption("--use-off-heap-memtables", action="store_true", default=False, diff --git a/dtest_config.py b/dtest_config.py index 86e8c96b25..116566b25c 100644 --- a/dtest_config.py +++ b/dtest_config.py @@ -11,6 +11,7 @@ class DTestConfig: def __init__(self): + self.sstable_format = "bti" self.use_vnodes = True self.use_off_heap_memtables = False self.num_tokens = -1 @@ -41,6 +42,12 @@ def setup(self, config): self.cassandra_version_from_build = self.get_version_from_build() return + self.sstable_format = config.getoption("--sstable-format") + if self.sstable_format: + assert self.sstable_format in ['bti', 'big'], "SSTable format {} is invalid - must be either bti or big".format(self.sstable_format) + default_sstable_format_prop = " -Dcassandra.sstable.format.default=" + self.sstable_format + os.environ.update({"JVM_EXTRA_OPTS": (os.environ.get("JVM_EXTRA_OPTS") or "") + default_sstable_format_prop}) + self.use_vnodes = config.getoption("--use-vnodes") self.use_off_heap_memtables = config.getoption("--use-off-heap-memtables") self.num_tokens = config.getoption("--num-tokens") diff --git a/run_dtests.py b/run_dtests.py index 34dd5af766..3f3e1fda91 100755 --- a/run_dtests.py +++ b/run_dtests.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -usage: run_dtests.py [-h] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens=NUM_TOKENS] [--data-dir-count-per-instance=DATA_DIR_COUNT_PER_INSTANCE] +usage: run_dtests.py [-h] [--sstable-format=FORMAT] [--use-vnodes] [--use-off-heap-memtables] [--num-tokens=NUM_TOKENS] [--data-dir-count-per-instance=DATA_DIR_COUNT_PER_INSTANCE] [--force-resource-intensive-tests] [--skip-resource-intensive-tests] [--cassandra-dir=CASSANDRA_DIR] [--cassandra-version=CASSANDRA_VERSION] [--delete-logs] [--execute-upgrade-tests] [--execute-upgrade-tests-only] [--disable-active-log-watching] [--keep-test-dir] [--enable-jacoco-code-coverage] [--dtest-enable-debug-logging] [--dtest-print-tests-only] [--dtest-print-tests-output=DTEST_PRINT_TESTS_OUTPUT] @@ -8,6 +8,7 @@ optional arguments: -h, --help show this help message and exit + --sstable-format SSTable format to be used by default for all newly created SSTables: big or bti (default: bti) --use-vnodes Determines wither or not to setup clusters using vnodes for tests (default: False) --use-off-heap-memtables Enable Off Heap Memtables when creating test clusters for tests (default: False) --num-tokens=NUM_TOKENS Number of tokens to set num_tokens yaml setting to when creating instances with vnodes enabled (default: 256) From c73391911f05b30b670f7f78a4681600e8bd43ac Mon Sep 17 00:00:00 2001 From: Jaroslaw Grabowski Date: Tue, 6 Apr 2021 13:33:30 +0200 Subject: [PATCH 05/19] STAR-247: remove format specific parts of assertions in offline_tools_test.py (cherry picked from commit c318850a479a8beebee2c4fab2bf761005acf08b) (cherry picked from commit 14050b837e9a07dd483bb2f3a07df8fa21a5541a) --- offline_tools_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/offline_tools_test.py b/offline_tools_test.py index 04ad4c0133..e11ddcefd5 100644 --- a/offline_tools_test.py +++ b/offline_tools_test.py @@ -271,9 +271,9 @@ def test_sstableverify(self): hashcomputed = False for line in outlines: if sstable in line: - if "Verifying BigTableReader" in line: + if "Verifying " in line: verified = True - elif "Checking computed hash of BigTableReader" in line: + elif "Checking computed hash of " in line: hashcomputed = True else: logger.debug(line) From 1253168f42c7cbe085ec7dd0f78f5ed509955406 Mon Sep 17 00:00:00 2001 From: Jacek Lewandowski Date: Wed, 23 Jun 2021 09:55:24 +0200 Subject: [PATCH 06/19] STAR-247: Standalone scrubber should use the same default sstable format as specified for the test Also made a small refactoring of dtest_config.py --- dtest_config.py | 13 +++++++++++-- scrub_test.py | 3 +++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/dtest_config.py b/dtest_config.py index 116566b25c..ad454d49f7 100644 --- a/dtest_config.py +++ b/dtest_config.py @@ -45,8 +45,6 @@ def setup(self, config): self.sstable_format = config.getoption("--sstable-format") if self.sstable_format: assert self.sstable_format in ['bti', 'big'], "SSTable format {} is invalid - must be either bti or big".format(self.sstable_format) - default_sstable_format_prop = " -Dcassandra.sstable.format.default=" + self.sstable_format - os.environ.update({"JVM_EXTRA_OPTS": (os.environ.get("JVM_EXTRA_OPTS") or "") + default_sstable_format_prop}) self.use_vnodes = config.getoption("--use-vnodes") self.use_off_heap_memtables = config.getoption("--use-off-heap-memtables") @@ -97,6 +95,17 @@ def setup(self, config): "--use-off-heap-memtables, see https://issues.apache.org/jira/browse/CASSANDRA-9472 " "for details" % version) + self.apply_to_env(os.environ, "JVM_EXTRA_OPTS") + + def apply_to_env(self, env, key="JVM_OPTS"): + current = env.get(key) or "" + if self.sstable_format: + default_sstable_format_prop = " -Dcassandra.sstable.format.default=" + self.sstable_format + if not current.__contains__("-Dcassandra.sstable.format.default"): + env.update({key: (env.get(key) or "") + default_sstable_format_prop}) + else: + logger.debug("Skipped adding {} because it is already in the env key {}: {}".format(default_sstable_format_prop, key, current)) + def get_version_from_build(self): # There are times when we want to know the C* version we're testing against # before we do any cluster. In the general case, we can't know that -- the diff --git a/scrub_test.py b/scrub_test.py index 3d50d70c31..ec5e3f43e6 100644 --- a/scrub_test.py +++ b/scrub_test.py @@ -116,6 +116,9 @@ def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_vali """ node1 = self.cluster.nodelist()[0] env = common.make_cassandra_env(node1.get_install_cassandra_root(), node1.get_node_cassandra_root()) + + self.dtest_config.apply_to_env(env, "JVM_OPTS") + scrub_bin = node1.get_tool('sstablescrub') logger.debug(scrub_bin) From f7684e4c78c9d0c669e9320b3313895079eba108 Mon Sep 17 00:00:00 2001 From: Jacek Lewandowski Date: Wed, 23 Jun 2021 13:53:06 +0200 Subject: [PATCH 07/19] STAR-247: Add allowed warning for running scrub test The message is expected since bloom filter is not recreated when there is no index. --- scrub_test.py | 10 +++++----- tools/assertions.py | 14 +++++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/scrub_test.py b/scrub_test.py index ec5e3f43e6..04c09650f4 100644 --- a/scrub_test.py +++ b/scrub_test.py @@ -110,7 +110,7 @@ def launch_nodetool_cmd(self, cmd): if not common.is_win(): # nodetool always prints out on windows assert_length_equal(response, 0) # nodetool does not print anything unless there is an error - def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_validate=False): + def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_validate=False, acceptable_errors=None): """ Launch the standalone scrub """ @@ -134,7 +134,7 @@ def launch_standalone_scrub(self, ks, cf, reinsert_overflowed_ttl=False, no_vali # if we have less than 64G free space, we get this warning - ignore it if err and "Consider adding more capacity" not in err.decode("utf-8"): logger.debug(err.decode("utf-8")) - assert_stderr_clean(err.decode("utf-8")) + assert_stderr_clean(err.decode("utf-8"), acceptable_errors) def perform_node_tool_cmd(self, cmd, table, indexes): """ @@ -161,12 +161,12 @@ def scrub(self, table, *indexes): time.sleep(.1) return self.get_sstables(table, indexes) - def standalonescrub(self, table, *indexes): + def standalonescrub(self, table, *indexes, acceptable_errors=None): """ Launch standalone scrub on table and indexes, and then return all sstables in a dict keyed by the table or index name. """ - self.launch_standalone_scrub(KEYSPACE, table) + self.launch_standalone_scrub(ks=KEYSPACE, cf=table, acceptable_errors=acceptable_errors) for index in indexes: self.launch_standalone_scrub(KEYSPACE, '{}.{}'.format(table, index)) return self.get_sstables(table, indexes) @@ -446,7 +446,7 @@ def test_standalone_scrub_essential_files_only(self): self.delete_non_essential_sstable_files('users') - scrubbed_sstables = self.standalonescrub('users') + scrubbed_sstables = self.standalonescrub(table='users', acceptable_errors=["WARN.*Could not recreate or deserialize existing bloom filter, continuing with a pass-through bloom filter but this will significantly impact reads performance"]) self.increase_sstable_generations(initial_sstables) assert initial_sstables == scrubbed_sstables diff --git a/tools/assertions.py b/tools/assertions.py index 7491a4b5a7..7148d1ec26 100644 --- a/tools/assertions.py +++ b/tools/assertions.py @@ -293,12 +293,16 @@ def assert_stderr_clean(err, acceptable_errors=None): @param acceptable_errors A list that if used, the user chooses what messages are to be acceptable in stderr. """ + default_acceptable_errors = ["WARN.*JNA link failure.*unavailable.", + "objc.*Class JavaLaunchHelper.*?Which one is undefined.", + # Stress tool JMX connection failure, see CASSANDRA-12437 + "Failed to connect over JMX; not collecting these stats", + "Picked up JAVA_TOOL_OPTIONS:.*"] + if acceptable_errors is None: - acceptable_errors = ["WARN.*JNA link failure.*unavailable.", - "objc.*Class JavaLaunchHelper.*?Which one is undefined.", - # Stress tool JMX connection failure, see CASSANDRA-12437 - "Failed to connect over JMX; not collecting these stats", - "Picked up JAVA_TOOL_OPTIONS:.*"] + acceptable_errors = default_acceptable_errors + else: + acceptable_errors = default_acceptable_errors + acceptable_errors regex_str = r"^({}|\s*|\n)*$".format("|".join(acceptable_errors)) err_str = err.strip() From ee9b61242476135852e38df8cb18b1adf25dc73d Mon Sep 17 00:00:00 2001 From: Branimir Lambov Date: Mon, 12 Apr 2021 11:56:25 +0300 Subject: [PATCH 08/19] STAR-14: Fix expectations to include memtable table parameter (cherry picked from commit 353b1f13362cf3081bf908f636b0c886100e98f1) (cherry picked from commit 1cbc8d5b4a61c564a4c334b357cba3b48a44eac6) --- cqlsh_tests/test_cqlsh.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 78bde3aba6..34b2822839 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -1103,6 +1103,7 @@ def get_test_table_output(self, has_val=True, has_val_idx=True): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} @@ -1192,6 +1193,7 @@ def get_users_table_output(self): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} @@ -1298,6 +1300,7 @@ def get_users_by_state_mv_output(self): AND comment = '' AND compaction = {'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32', 'min_threshold': '4'} AND compression = {'chunk_length_in_kb': '16', 'class': 'org.apache.cassandra.io.compress.LZ4Compressor'} + AND memtable = {} AND crc_check_chance = 1.0 AND default_time_to_live = 0 AND extensions = {} From 3a6ba36bacbf147970e43e68c559cd913d358adf Mon Sep 17 00:00:00 2001 From: Jaroslaw Grabowski Date: Tue, 20 Apr 2021 11:12:04 +0200 Subject: [PATCH 09/19] STAR-254: add DateRange and Geo tests (#9) * STAR-254 add tests for geo and date range types (cherry picked from commit d15a708fb52aec3f5e2f5e69ecc6b9b1780d2942) (cherry picked from commit 5bd412faa8716d8d3442b99867d939bbe150460e) --- cqlsh_tests/test_cqlsh_copy.py | 76 +++++++++++++++++++++++++++++++++ cqlsh_tests/test_cqlsh_types.py | 67 +++++++++++++++++++++++++++++ 2 files changed, 143 insertions(+) create mode 100644 cqlsh_tests/test_cqlsh_types.py diff --git a/cqlsh_tests/test_cqlsh_copy.py b/cqlsh_tests/test_cqlsh_copy.py index 396de32e15..2f4725f22c 100644 --- a/cqlsh_tests/test_cqlsh_copy.py +++ b/cqlsh_tests/test_cqlsh_copy.py @@ -3336,3 +3336,79 @@ def _test_invalid_data_for_maps(): _test_invalid_data_for_sets() _test_invalid_data_for_lists() _test_invalid_data_for_maps() + + @since('4.0') + def test_geotypes_copy(self): + """ + Tests whether cqlsh COPY properly handles geo types with empty and null values. + + @since 4.0.0 + + Steps: + * insert several geoTypes with null and empty values among them into a table + * cqlsh copy contents to .csv file and save them in a list + * wipe the table comletely of all data + * cqlsh copy contents from .csv back into the table + * get the contents of the table into a list + * assert the pre wiped data is the same as the newly copied data + :return + """ + self.prepare() + + self.session.execute("create table geo (k int primary key, point 'PointType', line 'LineStringType', poly 'PolygonType');") + self.session.execute("insert into geo (k, point, line, poly) VALUES (0, 'point(1.2 3.4)', 'linestring(1.0 1.1, 2.0 2.1, 3.0 3.1)', 'POLYGON ((10.1 10.0, 110.0 10.0, 110.0 110.0, 10.0 110.0, 10.0 10.0), (20.0 20.0, 20.0 30.0, 30.0 30.0, 30.0 20.0, 20.0 20.0))');") + self.session.execute("insert into geo (k, point, line, poly) VALUES (2, 'point(1.2 3.4)', 'linestring EMPTY', 'POLYGON EMPTY');") + self.session.execute("insert into geo (k) VALUES (1);") + + # make sure data is inserted + data_actual = rows_to_list(self.session.execute("select * from geo;")) + assert len(data_actual) == 3 + + # dump data to CSV and truncate + tempfile = self.get_temp_file() + self.run_cqlsh(cmds="COPY ks.geo TO '{name}'".format(name=tempfile.name)) + self.run_cqlsh(cmds="truncate ks.geo;") + + # import data back + self.run_cqlsh(cmds="COPY ks.geo FROM '{name}'".format(name=tempfile.name)) + data_copy = rows_to_list(self.session.execute("select * from geo;")) + + assert data_actual == data_copy + + @since("4.0") + def test_date_range_copy(self): + """ + DateRangeTests.test_copy_command + + Tests whether cqlsh COPY properly handles date_range types, including null values. + @note we cannot insert empty value ('') as it is not presented as null in cqlsh but it is in COPY + """ + self.prepare() + + self.session.execute("create table incomes (org text, period 'DateRangeType', incomes int, ver 'DateRangeType', primary key (org, period));") + # insert some data + self.session.execute("insert into incomes(org, period, incomes) values ('A','2014', 20140);") + self.session.execute("insert into incomes(org, period, incomes) values ('A','2015', 201500);") + self.session.execute("insert into incomes(org, period, incomes) values ('A','[2016-01-01 TO 2016-06-30]', 1007);") + self.session.execute("insert into incomes(org, period, incomes) values ('B','[2017-02-12 12:30:07 TO 2017-02-17 13:39:43.789]', 777);") + self.session.execute("insert into incomes(org, period, incomes, ver) values ('X','2011', 0, null);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','*', 996);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[* TO *]', 997);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[* TO 2015-01]', 998);") + self.session.execute("insert into incomes(org, period, incomes) values ('C','[2015-01 TO *]', 999);") + + # make sure data is inserted + data_actual = rows_to_list(self.session.execute("select * from incomes;")) + assert len(data_actual) == 9 + + # dump data to CSV and truncate + tempfile = self.get_temp_file() + self.run_cqlsh(cmds="COPY ks.incomes TO '{name}'".format(name=tempfile.name)) + self.run_cqlsh(cmds="truncate ks.incomes;") + + # import data back + self.run_cqlsh(cmds="COPY ks.incomes FROM '{name}'".format(name=tempfile.name)) + data_copy = rows_to_list(self.session.execute("select * from incomes;")) + + assert data_actual == data_copy + diff --git a/cqlsh_tests/test_cqlsh_types.py b/cqlsh_tests/test_cqlsh_types.py new file mode 100644 index 0000000000..11e4604c7e --- /dev/null +++ b/cqlsh_tests/test_cqlsh_types.py @@ -0,0 +1,67 @@ +import logging +import pytest + +from dtest import Tester, create_ks + +logger = logging.getLogger(__name__) +since = pytest.mark.since + + +@since("4.0") +class TestCqlshTypes(Tester): + + def prepare(self, workload=None): + if not self.cluster.nodelist(): + self.allow_log_errors = True + self.cluster.populate(1) + if workload is not None: + for node in self.cluster.nodelist(): + node.set_workload(workload) + logger.debug('About to start cluster') + self.cluster.start() + logger.debug('Cluster started') + for node in self.cluster.nodelist(): + node.watch_log_for('Starting listening for CQL clients', timeout=60) + self.cluster.nodelist()[0].watch_log_for('Created default superuser') + self.node = self.cluster.nodelist()[0] + + conn = self.patient_cql_connection(self.node) + create_ks(conn, 'ks', 1) + + logger.debug('prepare completed') + + def test_point(self): + self.prepare() + + expected = 'POINT (1.2 2.3)' + self.node.run_cqlsh("CREATE TABLE ks.point_tbl (k INT PRIMARY KEY, point 'PointType');") + self.node.run_cqlsh("INSERT INTO ks.point_tbl (k, point) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.point_tbl;") + assert expected in result[0], result + + def test_linestring(self): + self.prepare() + + expected = 'LINESTRING (30.0 10.0, 10.0 30.0, 40.0 40.0)' + self.node.run_cqlsh("CREATE TABLE ks.line_tbl (k INT PRIMARY KEY, linestring 'LineStringType');") + self.node.run_cqlsh("INSERT INTO ks.line_tbl (k, linestring) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.line_tbl;") + assert expected in result[0], result + + def test_polygon(self): + self.prepare() + + expected = 'POLYGON ((30.0 10.0, 40.0 40.0, 20.0 40.0, 10.0 20.0, 30.0 10.0))' + self.node.run_cqlsh("CREATE TABLE ks.polygon_tbl (k INT PRIMARY KEY, polygon 'PolygonType');") + self.node.run_cqlsh("INSERT INTO ks.polygon_tbl (k, polygon) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.polygon_tbl;") + assert expected in result[0], result + + def test_date_range(self): + self.prepare() + + expected = '[2015-01 TO *]' + self.node.run_cqlsh("CREATE TABLE ks.date_range_tbl (k INT PRIMARY KEY, date_range_tbl 'DateRangeType');") + self.node.run_cqlsh("INSERT INTO ks.date_range_tbl (k, date_range_tbl) VALUES (1, '{}')".format(expected)) + result = self.node.run_cqlsh("SELECT * FROM ks.date_range_tbl;") + assert expected in result[0], result From af5e094eb6ad961c9aee0ab465f128908ca1bcfa Mon Sep 17 00:00:00 2001 From: Jaroslaw Grabowski Date: Wed, 21 Apr 2021 14:50:11 +0200 Subject: [PATCH 10/19] STAR-452: add EverywhereStrategy smoke test (#10) (cherry picked from commit eb3049c6e2e79dd13da68187cf3d67a2577228cf) (cherry picked from commit fe23e0b086ef342de3003ff27c163217dd8fd207) --- bootstrap_test.py | 51 +++++++++++++++++++++++++++++++++++++++++++++++ dtest.py | 2 ++ 2 files changed, 53 insertions(+) diff --git a/bootstrap_test.py b/bootstrap_test.py index dc0691778f..3f67df0621 100644 --- a/bootstrap_test.py +++ b/bootstrap_test.py @@ -1018,6 +1018,57 @@ def test_bootstrap_binary_disabled(self): assert_bootstrap_state(self, node3, 'COMPLETED', user='cassandra', password='cassandra') node3.wait_for_binary_interface() + @since('4.0') + @pytest.mark.no_vnodes + def test_simple_bootstrap_with_everywhere_strategy(self): + cluster = self.cluster + tokens = cluster.balanced_tokens(2) + cluster.set_configuration_options(values={'num_tokens': 1}) + + logger.debug("[node1, node2] tokens: %r" % (tokens,)) + + keys = 10000 + + # Create a single node cluster + cluster.populate(1) + node1 = cluster.nodelist()[0] + node1.set_configuration_options(values={'initial_token': tokens[0]}) + cluster.start() + + session = self.patient_cql_connection(node1) + create_ks(session, 'ks', 'EverywhereStrategy') + create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) + + insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") + execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) + + node1.flush() + node1.compact() + + # Reads inserted data all during the bootstrap process. We shouldn't + # get any error + query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) + session.shutdown() + + # Bootstrapping a new node in the current version + node2 = new_node(cluster) + node2.set_configuration_options(values={'initial_token': tokens[1]}) + node2.start(wait_for_binary_proto=True) + node2.compact() + + node1.cleanup() + logger.debug("node1 size for ks.cf after cleanup: %s" % float(data_size(node1,'ks','cf'))) + node1.compact() + logger.debug("node1 size for ks.cf after compacting: %s" % float(data_size(node1,'ks','cf'))) + + logger.debug("node2 size for ks.cf after compacting: %s" % float(data_size(node2,'ks','cf'))) + + size1 = float(data_size(node1,'ks','cf')) + size2 = float(data_size(node2,'ks','cf')) + assert_almost_equal(size1, size2, error=0.3) + + assert_bootstrap_state(self, node2, 'COMPLETED') + @since('4.1') def test_invalid_host_id(self): """ diff --git a/dtest.py b/dtest.py index f2c89b21dc..cb2470cbf3 100644 --- a/dtest.py +++ b/dtest.py @@ -359,6 +359,8 @@ def create_ks(session, name, rf): if isinstance(rf, int): # we assume simpleStrategy query = query % (name, "'class':'SimpleStrategy', 'replication_factor':%d" % rf) + elif 'EverywhereStrategy' in rf: + query = query % (name, "'class':'org.apache.cassandra.locator.EverywhereStrategy'") else: assert len(rf) >= 0, "At least one datacenter/rf pair is needed" # we assume networkTopologyStrategy From 9fbd1b3b816f7bd46dd4ad42a0e5d6e3748351bb Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Mon, 24 May 2021 11:43:58 +0100 Subject: [PATCH 11/19] STAR-431: Add option to prevent any file-I/O from cqlsh Co-authored-by: Robert Stupp (cherry picked from commit 33e486a19421f77f7091fc8b96a493429c34cacf) --- cqlsh_tests/test_cqlsh.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 34b2822839..3d999dc027 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -23,6 +23,7 @@ from cassandra.concurrent import execute_concurrent_with_args from cassandra.query import BatchStatement, BatchType from ccmlib import common +from ccmlib.node import ToolError from .cqlsh_tools import monkeypatch_driver, unmonkeypatch_driver from dtest import Tester, create_ks, create_cf @@ -2456,6 +2457,33 @@ def test_cjk_output(self): """ assert stdout_lines_sorted.find(expected) >= 0 + @since('4.0') + def test_no_file_io(self): + def run_cqlsh_catch_toolerror(cmd, env): + """ + run_cqlsh will throw ToolError if cqlsh exits with a non-zero exit code. + """ + try: + out, err, _ = self.node1.run_cqlsh(cmd, env) + except ToolError as e: + return e.stdout, e.stderr + + cqlsh_stdout, cqlsh_stderr, = run_cqlsh_catch_toolerror('COPY foo.bar FROM \'blah\';', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', ['--no-file-io']) + assert cqlsh_stdout == '' + assert 'No file I/O permitted' in cqlsh_stderr + class TestCqlLogin(Tester, CqlshMixin): """ From 72eedea62574b486af25883f8d2b417c501b5024 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 25 May 2021 13:44:25 +0100 Subject: [PATCH 12/19] STAR-431: Add more tests to make sure commands work without --no-file-io too (cherry picked from commit 033a492608b2fcf54074a7b6e675379f76d0383a) --- cqlsh_tests/test_cqlsh.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 3d999dc027..8c3a55a253 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -2463,23 +2463,40 @@ def run_cqlsh_catch_toolerror(cmd, env): """ run_cqlsh will throw ToolError if cqlsh exits with a non-zero exit code. """ + out = "" + err = "" try: out, err, _ = self.node1.run_cqlsh(cmd, env) except ToolError as e: return e.stdout, e.stderr + return out, err - cqlsh_stdout, cqlsh_stderr, = run_cqlsh_catch_toolerror('COPY foo.bar FROM \'blah\';', ['--no-file-io']) + create_ks(self.session, 'foo', rf=1) + create_cf(self.session, 'bar', key_type='int', columns={'name': 'text'}) + + cqlsh_stdout, cqlsh_stderr, _ = self.node1.run_cqlsh('COPY foo.bar TO \'/dev/null\';', []) + assert '0 rows exported to 1 files' in cqlsh_stdout + assert cqlsh_stderr == '' + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('COPY foo.bar TO \'/dev/null\';', ['--no-file-io']) assert cqlsh_stdout == '' assert 'No file I/O permitted' in cqlsh_stderr + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', []) + assert '(Pdb)' in cqlsh_stdout cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('DEBUG', ['--no-file-io']) assert cqlsh_stdout == '' assert 'No file I/O permitted' in cqlsh_stderr + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', []) + assert cqlsh_stdout == 'Now capturing query output to \'nah\'.\n' + assert cqlsh_stderr == '' cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('CAPTURE \'nah\'', ['--no-file-io']) assert cqlsh_stdout == '' assert 'No file I/O permitted' in cqlsh_stderr + cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', []) + assert cqlsh_stdout == '' + assert cqlsh_stderr == '' cqlsh_stdout, cqlsh_stderr = run_cqlsh_catch_toolerror('SOURCE \'nah\'', ['--no-file-io']) assert cqlsh_stdout == '' assert 'No file I/O permitted' in cqlsh_stderr From e6ae1bf4b11f03d9c2ead7be5088d5408d47ad98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20=C5=81asica?= Date: Tue, 1 Jun 2021 13:24:40 +0200 Subject: [PATCH 13/19] STAR-432: Add tests for consistency level options (#18) * STAR-432: Add tests for consistency level options Co-authored-by: Robert Stupp snazy@snazy.de (cherry picked from commit 21c18c599f89427b3f1c865bd577eb15f792d42d) --- cqlsh_tests/test_cqlsh.py | 46 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index 8c3a55a253..d9f826133b 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -1884,6 +1884,52 @@ def test_connect_timeout(self): stdout, stderr = self.run_cqlsh(node1, cmds='USE system', cqlsh_options=['--debug', '--connect-timeout=10']) assert "Using connect timeout: 10 seconds" in stderr + @since('4.0') + def test_consistency_level_options(self): + """ + Tests for new cmdline consistency options: + - consistency-level + - serial-consistency-level + @jira_ticket STAR-432 + """ + self.cluster.populate(1) + self.cluster.start() + + node1, = self.cluster.nodelist() + + def expect_output_no_errors(cmd, options, output): + stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options) + assert output in stdout, stderr + assert stderr == '' + + expect_output_no_errors('CONSISTENCY', [], + 'Current consistency level is ONE.') + + expect_output_no_errors('CONSISTENCY', ['--consistency-level', 'quorum'], + 'Current consistency level is QUORUM.') + + expect_output_no_errors('SERIAL CONSISTENCY', [], + 'Current serial consistency level is SERIAL.') + + expect_output_no_errors('SERIAL CONSISTENCY', ['--serial-consistency-level', 'local_serial'], + 'Current serial consistency level is LOCAL_SERIAL.') + + def expect_error(cmd, options, error_msg): + stdout, stderr = self.run_cqlsh(node1, cmds=cmd, cqlsh_options=options) + assert error_msg in stderr + + expect_error('CONSISTENCY', ['--consistency-level', 'foop'], + '"foop" is not a valid consistency level') + + expect_error('CONSISTENCY', ['--consistency-level', 'serial'], + '"serial" is not a valid consistency level') + + expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'foop'], + '"foop" is not a valid serial consistency level') + + expect_error('SERIAL CONSISTENCY', ['--serial-consistency-level', 'ONE'], + '"ONE" is not a valid serial consistency level') + @since('3.0.19') def test_protocol_negotiation(self): """ From fd2b1c3d96e78c1530befd8fce5a75d5f37a9894 Mon Sep 17 00:00:00 2001 From: dan jatnieks Date: Tue, 8 Jun 2021 07:17:08 -0700 Subject: [PATCH 14/19] STAR-543: Port guardrail tests and changes (#19) Co-authored-by: Aleksandr Sorokoumov --- byteman/guardrails/disk_usage_full.btm | 8 ++ byteman/guardrails/disk_usage_stuffed.btm | 8 ++ client_request_metrics_test.py | 3 +- compaction_test.py | 10 ++- cqlsh_tests/test_cqlsh.py | 7 +- cqlsh_tests/test_cqlsh_copy.py | 26 ++++-- dtest_setup.py | 4 + guardrails_test.py | 99 +++++++++++++++++++++++ paging_test.py | 26 +++--- pushed_notifications_test.py | 37 +++++---- read_failures_test.py | 17 +++- tools/misc.py | 11 +++ 12 files changed, 217 insertions(+), 39 deletions(-) create mode 100644 byteman/guardrails/disk_usage_full.btm create mode 100644 byteman/guardrails/disk_usage_stuffed.btm create mode 100644 guardrails_test.py diff --git a/byteman/guardrails/disk_usage_full.btm b/byteman/guardrails/disk_usage_full.btm new file mode 100644 index 0000000000..bbdf8ddca9 --- /dev/null +++ b/byteman/guardrails/disk_usage_full.btm @@ -0,0 +1,8 @@ +RULE return FULL disk usage +CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor +METHOD getState +AT EXIT +IF TRUE +DO + return org.apache.cassandra.service.disk.usage.DiskUsageState.FULL; +ENDRULE \ No newline at end of file diff --git a/byteman/guardrails/disk_usage_stuffed.btm b/byteman/guardrails/disk_usage_stuffed.btm new file mode 100644 index 0000000000..3256211304 --- /dev/null +++ b/byteman/guardrails/disk_usage_stuffed.btm @@ -0,0 +1,8 @@ +RULE return STUFFED disk usage +CLASS org.apache.cassandra.service.disk.usage.DiskUsageMonitor +METHOD getState +AT EXIT +IF TRUE +DO + return org.apache.cassandra.service.disk.usage.DiskUsageState.STUFFED; +ENDRULE \ No newline at end of file diff --git a/client_request_metrics_test.py b/client_request_metrics_test.py index 6c5ef4dd9a..1900ddb099 100644 --- a/client_request_metrics_test.py +++ b/client_request_metrics_test.py @@ -42,7 +42,7 @@ def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.ignore_log_patterns = ( 'Testing write failures', # The error to simulate a write failure 'ERROR WRITE_FAILURE', # Logged in DEBUG mode for write failures - f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} tombstones during query" # Caused by the read failure tests + f"Scanned over {TOMBSTONE_FAILURE_THRESHOLD + 1} (tombstones|tombstone rows) during query" # Caused by the read failure tests ) def setup_once(self): @@ -50,6 +50,7 @@ def setup_once(self): cluster.set_configuration_options({'read_request_timeout_in_ms': 3000, 'write_request_timeout_in_ms': 3000, 'phi_convict_threshold': 12, + 'tombstone_warn_threshold': -1, 'tombstone_failure_threshold': TOMBSTONE_FAILURE_THRESHOLD, 'enable_materialized_views': 'true'}) cluster.populate(2, debug=True) diff --git a/compaction_test.py b/compaction_test.py index d84b5dcd0f..ba5486d0eb 100644 --- a/compaction_test.py +++ b/compaction_test.py @@ -339,7 +339,10 @@ def test_large_compaction_warning(self): Check that we log a warning when the partition size is bigger than compaction_large_partition_warning_threshold_mb """ cluster = self.cluster - cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1}) + if self.supports_guardrails: + cluster.set_configuration_options({'guardrails': {'partition_size_warn_threshold_in_mb': 1}}) + else: + cluster.set_configuration_options({'compaction_large_partition_warning_threshold_mb': 1}) cluster.populate(1).start() [node] = cluster.nodelist() @@ -361,7 +364,10 @@ def test_large_compaction_warning(self): node.nodetool('compact ks large') verb = 'Writing' if self.cluster.version() > '2.2' else 'Compacting' sizematcher = '\d+ bytes' if self.cluster.version() < LooseVersion('3.6') else '\d+\.\d{3}(K|M|G)iB' - node.watch_log_for('{} large partition ks/large:user \({}'.format(verb, sizematcher), from_mark=mark, timeout=180) + log_message = '{} large partition ks/large:user \({}'.format(verb, sizematcher) + if self.supports_guardrails: + log_message = "Detected partition 'user' in ks.large of size 2MB is greater than the maximum recommended size \(1MB\)" + node.watch_log_for(log_message, from_mark=mark, timeout=180) ret = list(session.execute("SELECT properties from ks.large where userid = 'user'")) assert_length_equal(ret, 1) diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index d9f826133b..b045e01142 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -1833,8 +1833,11 @@ def test_client_warnings(self): """ max_partitions_per_batch = 5 self.cluster.populate(3) - self.cluster.set_configuration_options({ - 'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)}) + + config_opts = {'unlogged_batch_across_partitions_warn_threshold': str(max_partitions_per_batch)} + if self.supports_guardrails: + config_opts = {"guardrails": config_opts} + self.cluster.set_configuration_options(config_opts) self.cluster.start() diff --git a/cqlsh_tests/test_cqlsh_copy.py b/cqlsh_tests/test_cqlsh_copy.py index 2f4725f22c..458804c768 100644 --- a/cqlsh_tests/test_cqlsh_copy.py +++ b/cqlsh_tests/test_cqlsh_copy.py @@ -2481,8 +2481,12 @@ def test_bulk_round_trip_blogposts(self): @jira_ticket CASSANDRA-9302 """ + config_opts = {'batch_size_warn_threshold_in_kb': '10'} + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': config_opts} + self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=10000, - configuration_options={'batch_size_warn_threshold_in_kb': '10'}, + configuration_options=config_opts, profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'blogposts.yaml'), stress_table='stresscql.blogposts') @@ -2495,9 +2499,16 @@ def test_bulk_round_trip_blogposts_with_max_connections(self): @jira_ticket CASSANDRA-10938 """ + batch_size_warn_threshold_in_kb = '10' + native_transport_max_concurrent_connections = '12' + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': {'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb}, + 'native_transport_max_concurrent_connections': native_transport_max_concurrent_connections} + else: + config_opts = {'native_transport_max_concurrent_connections': native_transport_max_concurrent_connections, + 'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb} self._test_bulk_round_trip(nodes=3, partitioner="murmur3", num_operations=10000, - configuration_options={'native_transport_max_concurrent_connections': '12', - 'batch_size_warn_threshold_in_kb': '10'}, + configuration_options=config_opts, profile=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'blogposts.yaml'), stress_table='stresscql.blogposts', copy_to_options={'NUMPROCESSES': 5, 'MAXATTEMPTS': 20}, @@ -2827,8 +2838,13 @@ def test_copy_from_with_large_cql_rows(self): @jira_ticket CASSANDRA-11474 """ num_records = 100 - self.prepare(nodes=1, configuration_options={'batch_size_warn_threshold_in_kb': '1', # warn with 1kb and fail - 'batch_size_fail_threshold_in_kb': '5'}) # with 5kb size batches + batch_size_warn_threshold_in_kb = '1' # warn with 1kb and fail + batch_size_fail_threshold_in_kb = '5' # with 5kb size batches + config_opts = {'batch_size_warn_threshold_in_kb': batch_size_warn_threshold_in_kb, + 'batch_size_fail_threshold_in_kb': batch_size_fail_threshold_in_kb} + if self.supports_guardrails: # batch size thresholds moved to guardrails in 4.0 + config_opts = {'guardrails': config_opts} + self.prepare(nodes=1, configuration_options=config_opts) logger.debug('Running stress') stress_table_name = 'standard1' diff --git a/dtest_setup.py b/dtest_setup.py index d04fb001bc..009fb220f6 100644 --- a/dtest_setup.py +++ b/dtest_setup.py @@ -332,6 +332,10 @@ def dump_jfr_recording(self, nodes): def supports_v5_protocol(self, cluster_version): return cluster_version >= LooseVersion('4.0') + def supports_guardrails(self): + return self.cluster.version() >= LooseVersion('4.0') + + def cleanup_last_test_dir(self): if os.path.exists(self.last_test_dir): os.remove(self.last_test_dir) diff --git a/guardrails_test.py b/guardrails_test.py new file mode 100644 index 0000000000..bf883bba98 --- /dev/null +++ b/guardrails_test.py @@ -0,0 +1,99 @@ +import logging +import time +import pytest +import re + +from cassandra import InvalidRequest + +from dtest import Tester, create_ks +from tools.assertions import assert_one + +since = pytest.mark.since +logger = logging.getLogger(__name__) + +class BaseGuardrailsTester(Tester): + + def prepare(self, rf=1, options=None, nodes=3, install_byteman=False, extra_jvm_args=None, **kwargs): + if options is None: + options = {} + + if extra_jvm_args is None: + extra_jvm_args = [] + + cluster = self.cluster + cluster.set_log_level('TRACE') + cluster.populate(nodes, install_byteman=install_byteman) + if options: + cluster.set_configuration_options(values=options) + + cluster.start(jvm_args=extra_jvm_args) + node1 = cluster.nodelist()[0] + + session = self.patient_cql_connection(node1, **kwargs) + create_ks(session, 'ks', rf) + + return session + + +@since('4.0') +class TestGuardrails(BaseGuardrailsTester): + + def test_disk_usage_guardrail(self): + """ + Test disk usage guardrail will warn if exceeds warn threshold and reject writes if exceeds failure threshold + """ + + self.fixture_dtest_setup.ignore_log_patterns = ["Write request failed because disk usage exceeds failure threshold"] + guardrails_config = {'guardrails': {'disk_usage_percentage_warn_threshold': 98, + 'disk_usage_percentage_failure_threshold': 99}} + + logger.debug("prepare 2-node cluster with rf=1 and guardrails enabled") + session = self.prepare(rf=1, nodes=2, options=guardrails_config, extra_jvm_args=['-Dcassandra.disk_usage.monitor_interval_ms=100'], install_byteman=True) + node1, node2 = self.cluster.nodelist() + session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)") + + logger.debug("Inject FULL to node1, expect log on node1 and node2 rejects writes") + mark = node1.mark_log() + self.disk_usage_injection(node1, "full", False) + node1.watch_log_for("Adding state DISK_USAGE: FULL", filename='debug.log', from_mark=mark, timeout=10) + + # verify node2 will reject writes if node1 is the replica + session2 = self.patient_exclusive_cql_connection(node2, keyspace="ks") + rows = 100 + failed = 0 + for x in range(rows): + try: + session2.execute("INSERT INTO t(id, v) VALUES({v}, {v})".format(v=x)) + except InvalidRequest as e: + assert re.search("Write request failed because disk usage exceeds failure threshold", str(e)) + failed = failed + 1 + + assert rows != failed, "Expect node2 rejects some writes, but rejected all" + assert 0 != failed, "Expect node2 rejects some writes, but rejected nothing" + assert_one(session2, "SELECT COUNT(*) FROM t", [rows - failed]) + + logger.debug("Inject STUFFED to node1, node2 should warn client") + session2.execute("TRUNCATE t") + mark = node1.mark_log() + self.disk_usage_injection(node1, "stuffed") + node1.watch_log_for("Adding state DISK_USAGE: STUFFED", filename='debug.log', from_mark=mark, timeout=10) + + warnings = 0 + for x in range(rows): + fut = session2.execute_async("INSERT INTO t(id, v) VALUES({v}, {v})".format(v=x)) + fut.result() + if fut.warnings: + assert ["Replica disk usage exceeds warn threshold"] == fut.warnings + warnings = warnings + 1 + + assert rows != warnings,"Expect node2 emits some warnings, but got all warnings" + assert 0 != warnings,"Expect node2 emits some warnings, but got no warnings" + assert_one(session2, "SELECT COUNT(*) FROM t", [rows]) + + session.cluster.shutdown() + session2.cluster.shutdown() + + def disk_usage_injection(self, node, state, clear_byteman=True): + if clear_byteman: + node.byteman_submit(['-u']) + node.byteman_submit(["./byteman/guardrails/disk_usage_{}.btm".format(state)]) diff --git a/paging_test.py b/paging_test.py index 6983d3f05c..6d666df871 100644 --- a/paging_test.py +++ b/paging_test.py @@ -18,6 +18,7 @@ assert_one, assert_lists_equal_ignoring_order) from tools.data import rows_to_list from tools.datahelp import create_rows, flatten_into_set, parse_data_into_dicts +from tools.misc import restart_cluster_and_update_config from tools.paging import PageAssertionMixin, PageFetcher since = pytest.mark.since @@ -3423,19 +3424,26 @@ def test_failure_threshold_deletions(self): supports_v5_protocol = self.supports_v5_protocol(self.cluster.version()) self.fixture_dtest_setup.allow_log_errors = True - self.cluster.set_configuration_options( - values={'tombstone_failure_threshold': 500} - ) + if self.supports_guardrails: + config_opts = {'guardrails': {'tombstone_failure_threshold': 500, + 'tombstone_warn_threshold': -1, + 'write_consistency_levels_disallowed': {}}} + else: + config_opts = {'tombstone_failure_threshold': 500} + restart_cluster_and_update_config(self.cluster, config_opts) self.session = self.prepare() self.setup_data() - # Add more data + if self.supports_guardrails: + # cell tombstones are not counted towards the threshold, so we delete rows + query = "delete from paging_test where id = 1 and mytext = '{}'" + else: + # Add more data + query = "insert into paging_test (id, mytext, col1) values (1, '{}', null)" + values = [uuid.uuid4() for i in range(3000)] for value in values: - self.session.execute(SimpleStatement( - "insert into paging_test (id, mytext, col1) values (1, '{}', null) ".format( - value - ), + self.session.execute(SimpleStatement(query.format(value), consistency_level=CL.ALL )) @@ -3456,7 +3464,7 @@ def test_failure_threshold_deletions(self): failure_msg = ("Scanned over.* tombstones in test_paging_size." "paging_test.* query aborted") else: - failure_msg = ("Scanned over.* tombstones during query.* query aborted") + failure_msg = ("Scanned over.* (tombstones|tombstone rows) during query.* query aborted") self.cluster.wait_for_any_log(failure_msg, 25) diff --git a/pushed_notifications_test.py b/pushed_notifications_test.py index a3b1bdcd5a..0e25e1b27f 100644 --- a/pushed_notifications_test.py +++ b/pushed_notifications_test.py @@ -388,13 +388,18 @@ def test_tombstone_failure_threshold_message(self): have_v5_protocol = self.supports_v5_protocol(self.cluster.version()) self.fixture_dtest_setup.allow_log_errors = True - self.cluster.set_configuration_options( - values={ - 'tombstone_failure_threshold': 500, - 'read_request_timeout_in_ms': 30000, # 30 seconds - 'range_request_timeout_in_ms': 40000 - } - ) + + if self.supports_guardrails: + config_options = {'guardrails': {'tombstone_warn_threshold': -1, + 'tombstone_failure_threshold': 500}, + 'read_request_timeout_in_ms': 30000, # 30 seconds + 'range_request_timeout_in_ms': 40000} + else: + config_options = {'tombstone_failure_threshold': 500, + 'read_request_timeout_in_ms': 30000, # 30 seconds + 'range_request_timeout_in_ms': 40000} + + self.cluster.set_configuration_options(values=config_options) self.cluster.populate(3).start() node1, node2, node3 = self.cluster.nodelist() proto_version = 5 if have_v5_protocol else None @@ -407,17 +412,17 @@ def test_tombstone_failure_threshold_message(self): "PRIMARY KEY (id, mytext) )" ) - # Add data with tombstones + if self.supports_guardrails: + # cell tombstones are not counted towards the threshold, so we delete rows + query = "delete from test where id = 1 and mytext = '{}'" + else: + # Add data with tombstones + query = "insert into test (id, mytext, col1) values (1, '{}', null)" values = [str(i) for i in range(1000)] for value in values: - session.execute(SimpleStatement( - "insert into test (id, mytext, col1) values (1, '{}', null) ".format( - value - ), - consistency_level=CL.ALL - )) - - failure_msg = ("Scanned over.* tombstones.* query aborted") + session.execute(SimpleStatement(query.format(value),consistency_level=CL.ALL)) + + failure_msg = ("Scanned over.* (tombstones|tombstone rows).* query aborted") @pytest.mark.timeout(25) def read_failure_query(): diff --git a/read_failures_test.py b/read_failures_test.py index 475f27815d..664ca70ff4 100644 --- a/read_failures_test.py +++ b/read_failures_test.py @@ -4,6 +4,7 @@ from cassandra import ConsistencyLevel, ReadFailure, ReadTimeout from cassandra.policies import FallthroughRetryPolicy from cassandra.query import SimpleStatement +from distutils.version import LooseVersion from dtest import Tester @@ -21,7 +22,9 @@ class TestReadFailures(Tester): @pytest.fixture(autouse=True) def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.ignore_log_patterns = ( - "Scanned over [1-9][0-9]* tombstones", # This is expected when testing read failures due to tombstones + # These are expected when testing read failures due to tombstones, + "Scanned over [1-9][0-9]* tombstones", + "Scanned over [1-9][0-9]* tombstone rows", ) return fixture_dtest_setup @@ -33,9 +36,15 @@ def fixture_dtest_setup_params(self): self.expected_expt = ReadFailure def _prepare_cluster(self): - self.cluster.set_configuration_options( - values={'tombstone_failure_threshold': self.tombstone_failure_threshold} - ) + if self.supports_guardrails: + self.cluster.set_configuration_options( + values={'guardrails': {'tombstone_warn_threshold': -1, + 'tombstone_failure_threshold': self.tombstone_failure_threshold}} + ) + else: + self.cluster.set_configuration_options( + values={'tombstone_failure_threshold': self.tombstone_failure_threshold} + ) self.cluster.populate(3) self.cluster.start() self.nodes = list(self.cluster.nodes.values()) diff --git a/tools/misc.py b/tools/misc.py index 542a889a5a..d746a9947e 100644 --- a/tools/misc.py +++ b/tools/misc.py @@ -157,3 +157,14 @@ def add_skip(cls, reason=""): else: cls.pytestmark = [pytest.mark.skip(reason)] return cls + + +def restart_cluster_and_update_config(cluster, config): + """ + Takes a new config, and applies it to a cluster. We need to restart + for it to take effect. We _could_ take a node here, but we don't want to. + If you really want to change the config of just one node, use JMX. + """ + cluster.stop() + cluster.set_configuration_options(values=config) + cluster.start() From 50bf8511799fc7f97d9f47a873630fef0c317108 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomek=20=C5=81asica?= Date: Wed, 16 Jun 2021 12:59:43 +0200 Subject: [PATCH 15/19] STAR-765: Add tests for cloud connection. (#23) * STAR-765: Add tests for cloud connection. It is not possible without some infrastructre or tooling effort to trully test cloud connection. Instead added tests focus on proper cqlsh behavior when secure connect bundle is specified: - proper default consistency level - debug information - reading from cqlshrc and parameters - skipping host / port information Testing validation is based on error msgs and debug info. (cherry picked from commit 9f8cba36d6bfbed92f9df0ccd86d8048d845d59b) --- cqlsh_tests/cqlshrc.sample.cloud | 17 ++++ cqlsh_tests/secure-connect-test.zip | Bin 0 -> 12369 bytes cqlsh_tests/test_cqlsh.py | 1 + cqlsh_tests/test_cqlsh_cloud.py | 125 ++++++++++++++++++++++++++++ 4 files changed, 143 insertions(+) create mode 100644 cqlsh_tests/cqlshrc.sample.cloud create mode 100644 cqlsh_tests/secure-connect-test.zip create mode 100644 cqlsh_tests/test_cqlsh_cloud.py diff --git a/cqlsh_tests/cqlshrc.sample.cloud b/cqlsh_tests/cqlshrc.sample.cloud new file mode 100644 index 0000000000..62528670c4 --- /dev/null +++ b/cqlsh_tests/cqlshrc.sample.cloud @@ -0,0 +1,17 @@ +; Copyright DataStax, Inc. +; +; Licensed under the Apache License, Version 2.0 (the "License"); +; you may not use this file except in compliance with the License. +; You may obtain a copy of the License at +; +; http://www.apache.org/licenses/LICENSE-2.0 +; +; Unless required by applicable law or agreed to in writing, software +; distributed under the License is distributed on an "AS IS" BASIS, +; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +; See the License for the specific language governing permissions and +; limitations under the License. +; +; Sample ~/.cqlshrc file with cloud configuration. +[connection] +secure_connect_bundle = /path/to/creds.zip diff --git a/cqlsh_tests/secure-connect-test.zip b/cqlsh_tests/secure-connect-test.zip new file mode 100644 index 0000000000000000000000000000000000000000..bcd4a7fb2a8e4bd6470263d456e034d426578f4d GIT binary patch literal 12369 zcmaKT1CV9Qwr$z&vTeJ%x@_CFZQHhOv&*(!UAArOcmMa#x#yku@68n%Yh|v;8Iien zj6LSav1KKIL689;eqR&3Y;pkq2rvLh00TW*1A9j$MNj}hKp+bp!^GD526p5?hirZO zr9`#Pz<314h+0q}7>o#5nKTxP@$u!1T5@h%c$mVH{?Wj~lmBXwz0FjVRd1pm|k)_owO8pJwP zcnqc+)04w6-ZD9S3Z~|MyG{d_>o?N zQWHZelD6J5KObFP`wP_}n=z@hF>L{kO9}?c-Up#LVZEd4awoTS1#L$L_;%MRk?&+x z--6^ft%e6u68De87nahW^Oah6z4Ub~0-uk)=oTT(oJb?~s>ig{1MH?J(@tCWe@2jBCMCSfzv7UE*ry)do9#c;(CNkw{KDYaD z8DZz|Vf&f@34}Q|3WMhr5d}C89zzn~J-Z>Sx?EK6n}&DmIFN2bFLzZ)zv&3R;+A_L zt3|wdV0PVk!wS!kz+CaKs9J=H<#l|wE<=mRa`#IqnQJU&tgN;yot6lp)uZLcDQL}k zD^J)yl$(nAey#n;fVf(hqn^!OJx+63d9%R2ogVB~(CZ$4w8(dBr6squ(r3?zgBQ1DY#Dg^ zX2*5mImx{_gZfj(@UmhC?fux0eJD|0aBDeBp{oJULQanO%6u*1B+bd2SAese~+W= zs%oz6J6KsQdIY&4NFEv>HT||IBK0^KbseBub%0N^?U5YB6c!A}OgFwCP#pdjpC^-o zV=w_GFo5FS8^Aq9I-dPlzEd--#LvNY2g9cz)o{yY58%y#WRi(nhu!`*pD+&()yo>s zK~**Dc}=!&HWJ*Ilr(k>v)9cW7J=t?&&^I%5bMwi%Q zdD}bTrL1v--;Ics9?f%;Sjm<<&6VvnScbn(!#Qe6ZMLaoX{V{)B8~@kr)trGgg~}8Sg};2VzV~RW zj`pvm_rs}SV`tb;V5=BJi0<0d3>g$2tS~|mPP)mO$3i@eF*ety7cF7}Z0HJp9-lh9 zcW0S8tPy7YFss_f+hWk6xjOk$-POOi3fn-PE%)>QiUqYQQ?8wUdvcHf%!=?cg84? z^Dniq)|ZZP;L1^68q(EzQ4#!v0C?ev(Q{@vfoy_V& z#cDoAqS7-Dl!BewFkTb$6}HwT{xmc``^jpRO)ik97v{Wh!h z2N?^E4&lD_n<}xV@s#Ko5(PF}ArIoLQv^T8ta(Dcxy&CiAxweDvYw#55cJ}xH|`?1pA;3W>QeVK($8}qqP!!p^}2>=PDFw{ zybU~;H`wu>ni9$hxThx=a`vnTl+_la)ac(6+%}zvNeMJ^2LoY3MrJZr1|EhYUaLPY z=wCvHv_O(GjO}%vjxNC2-6&dXMRBiBC=ey9X^ zR0_oD$%qcE;2u!ZUQgYk+t@P?6=%JMGb1^dxlL*d7cf-dWF`U=c^OGQJhH!BTp)q) zM)Uh{g=Im@o?diw&Q(BfO2y6OU%x#YalWEZ9Q1XS5%b79!r@;nuCN))5fY@Xcf~;W zp)D-EzUwE~ar5AFxjkY!qB)WLQj_HIfS2>(z|;iEmRUVpv4DwH*#HXy~aiAW?NY@>#hAEH1#Z8m-9x%xi$LD zewEC!PHf}Rm3`?Wnh4o_*3}jhsHXWR*71O2@!&}swexkd!GqK+KDk6zA=gJFg=Y1) zQM_v@iBsqK$IwaOi5L>(&97YKx9MGtzQIeCtFd6sPcUu8OGutVnpNP#HOP|0(fw!Y4dcNEXt z$yb>+jmaExO^#qE5L%0ekWUI_E#m#}=gdki!HQJH27_XkmQP)xk@JH`K|1v*F{x;A zI6Vr`m&<*Gj?aeM_9D}H&DUDrjBOgn&jvRAKRq^~UVDZN->3T4sS}4_MY1J>jJe|t%SUKwWP&CFq- z6R|5hIF>zYUAjc@O@Z060<8sry5ojM5?oUq9-`|^Ad~w-dy#-@?Cn;#>;4-;J%^JB zAb-PW1la!up&);Vo6$dLTVkSAO??My@X>oX6B}9nv2rjC4W4*KkURy`3xXhWQ#gwE zN0^6@VdR(FID#K>IK><@Fk5=K?Q`Ko21RD(*ro zP!71pj5`9o<0^zaHfqZ=u0yRGxdvlu{2N+r=i$-J7{>|BQ$?<}_K2GT8lziyODVGL(#1!Y2HkWrByenZyqtV!sb} zLQAg;(gB`qXKGknC$PYdyk7R2lXZ?f6CY}I%m$nd6H8&2LTVFZ<3`M{wraHFf#+Ap z?z&Xr&Bp=f>mtlXv}P$b3L=-EC?Url3OQr_zgSmCw6-?E$DZi0GMOFhxo2v?8oe@< zQ_6C3TvQGtXH#S~t7#nxf$mkaVk$i8Z4N$fw4XF8h8LO5y7r`==5`xcrlRGTkawB0 zMfw?Rf1Er8yvR=TO={}MP>@4{^ps(MD*&P9Z0?Kdu`9t?@VZ%t@qSW7VF@o+^MQ=t z!bw;PBooMr+f)z-Qrr9WL3ycT9CJc2!eBHp=ce8rMbkXA8}#JXg4M0kwTs*AfJy&4 zzHNLUU1*2oQ8u+-F?H&Omz^T(O5-wWplbO(pYYUWysH`I%oJl$DkGwHOf{F+w=Nhv z1>dx{`)Cw3HqV0b+#HgC-akb_)p|XGkn#94Jd&=+wxXt)rYm4LXnOyC+j`=bd-IH# zp-b0_bhph!y>8cw^CINhgHcO;9Gtm{L97iWSiAQOzkK~8bSDaaWawqOB3*1|0^LS`pk2?dJ*(A%K}$C;>Wk7mst7i+gp9JS2;10=#3zQ-@Og9#o+osL`=_W+ zoxkDZ3_h?AYxG;?&LHMem1n~EhSx;cJ3_#B#7m%TL*$qiTJ zMKJA=j3+hQCZkVCBTg_RO*3R16+~{3ek>X_jIY(Cu(&~m+6_WRKu`-eDx0xbVOt4= z9-);`69l2QP%^Ar7kD|s?fhDQ%6y7>;<$2~dhza>c2l=*UO&8Xeffb)SM$*16!e}g z8b3TPVeRf*9y?5B3OELh3yiaN3u<)-bsK)yzJKs@2PF|rv)MYwgZ$Xkz9w~<{ub|J z`e%DP5mVRGjGx6<&zH@;Hh8?L#^vg-;^EW!fUPHsWm1CERj!MZg<ds0>JnkBsMMCpq5sLBNlz2_m76&h4 zCF+d3+f5Hs-el&__nak zbK9r156$9}pbp(PKm(Q`3&}+1AX^<*? zW-P{=D>lzrX15hk#$z`Ifx%F`FC9@fDyiJ}RE?^Q=KC%O8#cVcal#3;r76 z+$59t@#vLkFk;bo8r$UAm|QX_j?#(!p73EedDu~&Hi<>iKSk9lFbJNa$t~-PzS_|c zrP}Jd)af<-*6xz5L%XH9X5UG|QSoYO_|eiE51)?&U8(-G>+EU?6K!NzPL@lP zncfBRWXF}X)K|-ekVxxJb0^!C*PHGIO@Zi(P=B(ns}17A!`0 z#d7b~uRJ$H*^OCeMOhd7K$|(Xh4z|dN)<=;dfD{8z57L+1Qp1H2Kd@Ebz`?7O`ctX zo56(Yiqhw>zJLw5x6mCflg$sqmo7C1EF&;s2FqrJ%N`9G{_=*3nyVq}gO2usl99rK+SAx_bkEpgI%qP}lt?TXah5N7?x>r)Rg`Y-~0ev~c zgYJqkTy+FGOB(TPXG5!NB4`J!@HTg>En@B``&ZD1G@r9yJ1tYu0Z+nNg~g83)#SSE zPMn6UnpU2!?Zst|v*5r}>pG@1*bCNY9q>HBo7z9FT}N7y%a55Dx|irkR!3OGR_%Y@ z<*~d0I}Dkg2BuIJ8kjI|k;6+@uBzIbS)^L4XepxksB?G~*2_$$U`c9(lm2KY;N3i#ILQ^SM60HaKGFoyn(Gh7Wfbnee_YaS52{mT2+nc zquDwBLl5f4eXlD#PV@M*kLt+hS=;%CY3%5NEvg<>n^A&XVP_GKXqWl503?CJR|oa8u9 zC0cD`dG8Sb?IQ!ks2Xxn^=+Ym%#dK@i1CP!GUIdg!trGT=lejyROum<0tlgjiWoYD zD)l$a0|^Gqa;PE;5s{d?=tJYh2ln^c7b)kel#_G?A;ANxgxv!RbW79;>ia2?6bPz} zbupJ_^#cb*6(hr>_#y@4Q`Hal_KX2~Diu{_UisO@$Qb5uip3};O|-C+e}>#Ei|<2G zvA3%c*R9zCFi8Y7>YO@=Nn)&0!`_-@oV@fO$%z9?GxV5w(qcpVX8H*>pr0e{07_bV zRU3U3f+oshL>|AOqkA&cz$?fjKjOurBf;z<7!$Gwn&C(V*owtBi36^}^h*Dv6i5gS zi}5R}0u#pzPG6w(Ew=S-VlZO|(5H(;A~x*(CJl))K#nHT2>?gr%e<`^!tZGk_za!; z6$Jg&?f1fj7R>jK4pEwP*s>b=orK`VRFKMIEa~UJ(KyYB1X=I%#j=eO_JO)c- zyJYrk>ZH5sRbv=v8M#$o!fWapjXz@1iUmB!c%^>IuPA$Lo$%0a&Iy!Bfrv#D_&xV< zHGa0~NPuXfvxw|OHz~XNKhdP?;bf4T5y94v2?bjK4bbEINWyN#)LI%r9+3>onyLAh zZK2EWkif_XgLH{dW`{r`g@~5qlqEE=t{IiH#F2iQiW8YpNn0u@*; zoIpc8Sa%ahmNuYb48}G72!$s9mKpnoWA7r2s23ULNKv6sA1Y%CKWMe88g6q()j$|R zl6WJ#S;<2sCxMXR%VLZSNt?DKh{?4*zy}tK;lZ9uDIN@zHRn z>G$*e_pVAx;wDgh-qa#4U+jop+@N1TJNgykRebprM#%zz+*WfUNapRt2C_`tZ6Je; ztU;9L4WfqFA%cGLTF4Z@-}vMOp{p>f{3*thLC>ATV4%p)6_-V?x$NVkpjIL|Xc5Gd z=0J#NSLi(?qj^h1U{a~MqvVPccXeR|VoPTbiSE;6BNbX%^sya+O#yXmVj7UKSTUp+ zR@Dz7{VBzB`G}`{2aTspiD{#SHh@9|2g57zO~O;2e}L>V=E4O>fTG~CPNE61OTp+W zA#YC$2U2Si#s`0sPa+8%Pcj^-z({I>?y#A zlJMU8d2hJ4BhBLC{aHEke)TuAgo#u6LW2ST0D=DB3RAe>%+k@`$-z;<(Z=5Bzi~^$ z#PQ1pX`G=qHglLwUx}wE3glxw4knny{uwwJ+2tgGJZ$$u-WQ!fG~L^w7Q2(NprI(N z8eY1+&LEZft?3}QtVzS{L6cjr!?dHdB^w>Ggi#sNgi(0Bx9KZ22eXFJ{g6kxug|3# zQl5?RcX!5@l`Zc(Z3K(94~syY;)?G;Fg~T&vrdsHfxIqbd^G~GXAs$)+Ie|;+Jv3Q zNLaF#d{D3NvGArKU=vn&{;__mCfIo~xU;f16pMB?ZJWmLh_A6CP<~GKCD%4Wrfmfp%q1|CSM{L98*hkI;|qIakX362k+q5n9-*|^ z4+eQ0d66>`r`%*-u?guXAl+*e#|Vmnv=iTLS4C}^k@&NLYG4B{9(927b;*1V7iRj6 z1rOwLP4Nh?ZFd>z;os#!C#fpv5cI=|^qd0wLgjO&kA-wihP-*oit}>w!Gm$VAx3x$ zdCqy{1XHDUC+AVYVpH@p%pLP;cEJNvt46}fAv9@BemnohvwT-t8&KLPLcbcdB<%5p z5>eaqXqFY~xLrBKp%(SM7^|QbvqY4|$+J6raan5u0jT=Zixj_*chsQ+7>XY|7cKD! z9Z3F>OwW!c8K|qfam`^9f=p?bPJa-qumW$^?dHIs&}2;CId zrgc(MtOV->M8>7A+^J3@(@X`|1U0QUuv^32iK|J7sArJ|&!*_;$As6wq8I1Fxh?1P zuNH>a%KDy{ZL%boaRO{K&R!N2Irkxj{%E~M);DuVCwHmgwu`|+b_Y10xEb{VUaq4R zMu3r;ro+vmkT)e(H1OKSg0of**3G+{-4&yOayl-XBg?%p<6wh?3=vFKKZ3OAh_ucu zkDz^H^Drzki*`;!^>J!obHz}PwiUy0cO5^Iev#o6%|WGvXc4%bjYj1)WdlYD2dfAQ z^2eBzlA^m@1tbu6n4Vr?kN|Br@Ua~iUu$CoS=WUC;@!-YI;Lnw*Yn^SJ+mz$wdRXJoNlcVBWbcG~Sfl8&;kf-IdA@ z&GMT=^i->{qaPPd(jh8&dbWG-+_ly#+|N}jlT9W~xy;i`A%e3+CRLs1g9O7$Wv@3w zQ2PppP;bLo_`vEKqxS339?}u+s}k?tL@v9OsW}2N01vOLN?~1R@gbNfHwFGfd1JbF zR?$cwP7_rWNO-QM zZlFvEup=`W#26+tJm|l|$d46+2h5#{5tWAQE?nt|bhnb%mcKe(N);@+rx3|Xx5m&8 zHQoL`qcFJJqw9X(d-D0}*ZF!G-uk?h;e0)!>wf;v;YE_2VM^xj()sm&EuEqNC~r2_ z#%3n8<_gWhUf-+0vCt;sXF>$5ayo2nsX{_yNpJ0qnEnwpUtVyS@YB zt3@FC@N+=G4A@0>HnZLMG+FR-f@WObf$zG!KK#n0r)qn`#p%(rO^CZ3o2Po5zeCPo z=F!gdfN)J`0#0qvuJ8K*&po#(>MnAlsxD4$aalHIFS}W8IYe+ywZ>=oueNd^;AA!v zG`o&m;m(^m)r52&DdY^NkiT)=;LTBWo7YrQ&<-_9x{xR*tWaV+>?wS}znWt8pFXUv zFjUSbRtn#(4-Okjji*d*E7RpqsT^_sbtIJX(T0Z%2mo;XyZz5cLg0UP)y~qv)ZXCl z9c3QlvmU^Q;hpW2=>^Rdppaza7&S$uMQ4HX8*yGed{mNmdg#wK*O%dPw|OTYe8o8gydcaUJRGcMMO4bnrkCp)r{BPfc{&G128AFJx5 zp??g8uP9T3+$Q8U!CdxrkQHs|eK%9xuhy&!ZN|0&*4E><3zkd(m zM|G+)e)k4{xBq!z5Puf-Td1YAHFo`bWdJE~rZ@Eda0x(wz`t)gV33r-qQOtFfZ%|+ z{&2D&{&14NhehfAA-?|e3M|(j;`;Z{IS?Sg@7uwD-T=~LgF*ej3^0IT^l%^$+lxnx zF)a2CKtK=xzWxv`{tM8LhP2OW%#qeWezPU>W!hqQJQFZ06hc7>0uv-@aa2AKf~%rq z2zJPsUT{&poY7)sQcAm&$4t4XGFZ=wqnnm=Yij{+{HDHpQ5og64~mtmUTWjEfKPYA zC_~>(dO0%Q)#oRs?lsK#lKQa@D{>-#xOg`udHEJ@=pfN!~<3I^VqYn%2 zy=L7iU`!1p=GZKvU*y%_trWzV-si`e*rn2kU@2+0PIzeJkMLl7JVXxj=|~b9Ec8P+ z!Sg*rUS*<#Cal4i8Dcw)^hd_-_#Ear$4X##Qt2?qN7#@6F{0E*Oa;wO6m*P!L(jdl z5&{BrnRy4)MXXDF?=>a`c(Q{Y*4)V#;{z^Vr-#)VM7~~NcF0Yh9(Gg z<%8y#e?+-Oy3gUg3aTa(`i<*UU(*YgN1gdJI4K%(iRqrm=O@p?>tmu~;1#r5S_Hu7 zA^Q-PNwYC)S&X_$*(j^U7KB3IXml2$8vaC&9X|+$*#+OguS|vK=6b-IUk%-DDMFmx z)NHl6kLl)Pa-IfHUS(6&aY_uH;KzKKVV@V2U%Cpjb9bi8Fm;@lJMnUWTJ|cW&p07O z?|g9ak^Zv~6)&yN@;@<$Ggk0!`32jte7a3&iTdQ-s^|JO9@a%u6Zy^~25~bRP}YY> zRzp$qKIBxLqs|fs(>oIN>HJ5eqx+Wa+dOZQOIZ~GBwzT*0+dF4N`c!yw1fu)%=jnq zMk{rmpZCJsHidlvJ{gY_-Hq}o-&TX@AFn*g>Qc4cs5Az3lhOu-DleUXEQ7tyeaii+ z9K>HH!BoX()0;u+q5wS)aFxZnm*Qr-@3UZt`LWd^(kP0MQqgV>01$$8gs~uqy-s&L zAG?j;z0Bur-VaBS-=J&+6$GZHJ9aoWM0*8u_r5*!m|?kZ&Z$+coMe-D^G(b}z_?8^ zpo*9-$p6>Q4I*y_TXj{RHrLqx%m)lLk-|V>XEe~NKJ#;)s4JV!D4al@vu|+nue`&k zQ&tcDs;3WqoE;6K1fzFCq3URMe+7-SBhhefjqAa!v#j~-zWUk-xW6_{{x z#`mR1Yz9m;Ef^>s#k!aASpY60BY&|~I8gvjEQ%A+!ur@UK@TxbECT128`Itcb^XXm zLn`~CE5;i4#={}9A8FuCdAmG%C+TN+oXNms`%yJPa{NQQzUS%ixMI1utxW5T@IW-! z6X1T_)p5%fD@NWLH2OtU+}PK-w{#g(`N6~pDX9xr$eyJ-99`oygnhF%n>R)Yl^2o5 zAgWJEszaKmKXUz?7L2y`_i&1jvIo`|_mq!TjHc)+!duMaTA``>0dbSGPgH6p*Yk3x z>vPv%y47sl-fE11oT%pyq2%mX%v|ypVtNLR=HyDy7NTn5$d^8zigmHnmWd*LKSp z{vaH5`c$1}X%v)87U5mbUChM@xwj)S-JoLyQ7O|?-dsLgPj>;sczG;v-o=#A)|)Lb z>j2PMUoD<1RIoaVCXYF&^u??LGLOY6XsFasi&zTJ#C>&LSMG1a0uv}8^?JpA_3-_i z?i}fLaq!7(m2vO;t+m~Ef>!3jB+D(aqIGdQcBvw?Lw5&-$bG9iN_z`7Cp^u1G)w%- zapVJ^)DH~v##OWQdxKQS?ZVt+&Z1D(Q8}lEm?iF`f>jH^M_qVz_JkObG?$^p z9YcRn=|2UfQ5OAkhc-$^Ks!`wN|t-SYSk}VVI`s@K=x*7-n8TBr(Ky+OH`|^vo zMPMbuu@w?xv4T<LjJRZCx>T`u9OSIF&P$Gz*V+v0Zfrmc^{^ytreFD_SDBtDI*V$40$!FP;aV zCUF@=i51YZy$lXXg9^@HcCZR20BTMyooaPer zcv#_SYr78&XcTRgMs|Mo668T#^bygmzY2ks5`=ki$vlz6!7T4mWM2`Fr2@7~wRrWm zIWiR@$)YbQTO{F^IUTvPpk4t;nexfwuqswM$DkuepR-!*hK>Pv+2v zcSN?FNp`#%g-ylt>pc&iK&~`Vgap~Kjng-6#b-EErm>#7dbDvMg@=Y+FDUR3ccI|P zclQXorwhfv`1^MjrMKb}XyQ^2)8h}bbdgbsS`(7PAFQr6}!q1bugOtw-bkZM&lxY z!a++|^Wi$gXSD<(c@IdFgj8m+>AFOQ^B_Tr6o;f5&*ey#G>e4&#HqZ~NM)gxF5Eh& z;dN>ldyN6CqFXy(ruPTs`v=j<^#^4H@du^-Jxuw#k%B@0MSGwCfpYypF6sS2PX8Ml z0{+8>vMSoWw=8Bq{;(kse~=N*DiYKVpPfRJbWwOxU|1WvX@O&8`~jaVbF%GM)zDVX zJF#Jrx$mR5p|*4NcKJ>AQ*uY}BWg2Z zfp@v-9+oMEx!99z?#cNqZ^gf~5!FU-!`cK>VRF{fPpp=Q;Oq*`on|MQJL0Fh|AYq) z^=p%&arD$SG5&C>tVx@&#uEOx8XNLwJmJ%Z+$(#@88?vACXQsc1T{-X7S2%$oT}?0 zEjx&ST9Pkj?@iO+{xus%)E++Ju;nr|sxm@4~CJJ5j)d6%Iip-X%7uF~M5!Zt= zVk=43_lfAf&~KOE%9T@rX=ySwI5N_kynocO4R9&%Q&(gEOo&f@%r5Cd4l0ACQv&$f z)K%;jq&r@J3Hp7@?Jh`zWY3<5f6p@B=tsjdTRJ0FL0>2D`fD>5&39q-d26?mMTW9j z_+dt)D99ZdyJIIJIp}*jB0hCd zI(kXWzvP8cANNpHDx)^uHMxHXCFGK*kJ9iDmG7B+nNxi^wEK~v^Nj}A)0g(@KB~ov5!yNi?r-Ag-`~?1CU+6=~%**xw zH;ahOG%+={lQGI@`b3mn;@~tvR0Hf4NjZT;3-kK@0KO245d|&!05IOs6Kt%HI&9ulaz2_GBQT zC;A&5AyF7mL6F;+{6_Kw=xqail?qfffIswRc;M)%=<(>m!9c0tfPpYUkezdpgEOFh zoiGYp(#rrvVFQ=J9DzVc7Ic;2&ePufF4On z=N}LGU*YTb=jmUL@_$eMr^D%w74)z0^>hBMzWmb|`tOZcIeyzVd|IH8e z@2US(um89o{)$4sXIOy$zZc@)DgLPs{gJu&`%%lzqatdDHXC3puc@+ S0RQ*T124b78vL|>djAKVQ_^h! literal 0 HcmV?d00001 diff --git a/cqlsh_tests/test_cqlsh.py b/cqlsh_tests/test_cqlsh.py index b045e01142..15dda8390e 100644 --- a/cqlsh_tests/test_cqlsh.py +++ b/cqlsh_tests/test_cqlsh.py @@ -93,6 +93,7 @@ def run_cqlsh(self, node, cmds, cqlsh_options=None, env_vars=None): logger.debug("Cqlsh command stderr:\n" + stderr) return stdout, stderr + class TestCqlsh(Tester, CqlshMixin): # override cluster options to enable user defined functions diff --git a/cqlsh_tests/test_cqlsh_cloud.py b/cqlsh_tests/test_cqlsh_cloud.py new file mode 100644 index 0000000000..626eaaba09 --- /dev/null +++ b/cqlsh_tests/test_cqlsh_cloud.py @@ -0,0 +1,125 @@ +# coding=utf-8 + +# Copyright DataStax, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import pytest +from ccmlib.node import ToolError + +from dtest import Tester + +logger = logging.getLogger(__name__) +since = pytest.mark.since + + +@since("4.0") +class TestSecureBundleConnection(Tester): + """ + Tests related to cqlsh behavior for cloud e.g. secure bundle connection. + We only test cqlshrc behavior. + Testing if the connection using secure bundle is really working + requires a true cluster with generated secure bundle to run. + And this is not possible without testing infrastructure/tooling changes. + + We can assume that it is correctly tested by the python driver or + will be tested in the next stage of testing (cloud native). + + Validation is done using --debug information or error msgs. + + Inspired by STAR-765. + """ + + CQLSHRC_PATH = 'cqlsh_tests/cqlshrc.sample.cloud' + BUNDLE_PATH = 'cqlsh_tests/secure-connect-test.zip' + + def prepare(self, start=False): + if not self.cluster.nodelist(): + self.cluster.populate(1) + if start: + self.cluster.start() + return self.cluster.nodelist()[0] + + def _expect_tool_error(self, cmds, options, msg): + node = self.cluster.nodelist()[0] + with pytest.raises(ToolError, match=msg): + out, err, _ = node.run_cqlsh(cmds=cmds, cqlsh_options=options) + return out, err + + def test_start_fails_on_non_existing_file(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip'], + msg='No such file or directory') + + def test_start_fails_when_file_not_a_bundle(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', self.CQLSHRC_PATH], + msg='Unable to open the zip file for the cloud config') + + def test_read_bundle_path_from_cqlshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH], + msg="No such file or directory: '/path/to/creds.zip'") + + def test_host_and_port_are_ignored_with_secure_bundle(self): + # it should connect with provided host and port to the started ccm node + node = self.prepare(start=True) + node.run_cqlsh("HELP", []) + # but fail with secure bundle even if port and host are set + expected_msg = "https://1263dd11-0aa5-41ef-8e56-17fa5fc7036e-europe-west1.db.astra.datastax.com:31669" + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', self.BUNDLE_PATH, node.ip_addr, '9042'], + msg=expected_msg) + + def test_default_consistency_level_for_secure_connect_bundle_param(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip', '--debug'], + msg='Using consistency level:.*LOCAL_QUORUM') + + def test_default_consistency_level_for_secure_connect_bundle_in_clqshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH, '--debug'], + msg='Using consistency level:.*LOCAL_QUORUM') + + def test_set_consistency_level_for_secure_connect_bundle_in_clqshrc(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--cqlshrc', self.CQLSHRC_PATH, '--debug', '--consistency-level', 'TWO'], + msg='Using consistency level:.*TWO') + + def test_debug_should_include_cloud_details(self): + self.prepare() + self._expect_tool_error(cmds='HELP', + options=['--secure-connect-bundle', 'not-existing-file.zip', '--debug'], + msg='Using secure connect bundle.*not-existing-file.zip') + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_endpoint_load_balancing_policy_is_used(self): + # to test this we would need a 3 nodes cloud cluster + assert False, "TODO: implement" + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_connects_correctly(self): + assert False, "TODO: implement" + + @pytest.mark.skip("we cannot test it without ccm secure conn bundle support in ccm") + def test_login_command_keeps_cloud_connection_using_bundle(self): + # cqlsh.py -b some-bundle.zip -u user -p password + # LOGIN user(password) + assert False From 09052110c8a07c236ed83dee875ad0670a32e43b Mon Sep 17 00:00:00 2001 From: dan jatnieks Date: Thu, 24 Jun 2021 14:34:22 -0700 Subject: [PATCH 16/19] STAR-386: Add logging around failure to get timestamp info (#28) --- bootstrap_test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bootstrap_test.py b/bootstrap_test.py index 3f67df0621..960af0b4a7 100644 --- a/bootstrap_test.py +++ b/bootstrap_test.py @@ -882,7 +882,9 @@ def test_simultaneous_bootstrap(self): # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. + logger.error(node1.nodetool('status').stdout) for _ in range(5): + logger.error("Executing SELECT to node2") assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE) def test_cleanup(self): From 4c7c22cce63d890cab70bad02c06394a6d9e9bef Mon Sep 17 00:00:00 2001 From: Jaroslaw Grabowski Date: Tue, 1 Jun 2021 10:32:40 +0200 Subject: [PATCH 17/19] STAR-564 Check only MODIFY on base when updating table with MV (#17) If a user has only MODIFY permission on a table and there is a materialized view built on the same table an insert will fail with the following error: Unauthorized: Error from server: code=2100 [Unauthorized] Only base MODIFY permission is required to update base with MV. Co-authored-by: Zhao Yang (cherry picked from commit 55dad391f3f98577d9b0b7e923a5a9b4eacfd2ab) (cherry picked from commit f1e441e3eab61723f8290f09cc3053959601a57a) --- auth_test.py | 42 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/auth_test.py b/auth_test.py index df57fb0120..a961465aaf 100644 --- a/auth_test.py +++ b/auth_test.py @@ -541,10 +541,16 @@ def test_materialized_views_auth(self): * Create a new user, 'cathy', with no permissions * Create a ks, table * Connect as cathy + * * Try CREATE MV without ALTER permission on base table, assert throws Unauthorized * Grant cathy ALTER permissions, then CREATE MV successfully + * + * Try to MODIFY base without WRITE permission on base, assert throws Unauthorized + * Grant cathy WRITE permissions on base, and modify base successfully + * * Try to SELECT from the mv, assert throws Unauthorized - * Grant cathy SELECT permissions, and read from the MV successfully + * Grant cathy SELECT permissions on base, and read from the MV successfully + * * Revoke cathy's ALTER permissions, assert DROP MV throws Unauthorized * Restore cathy's ALTER permissions, DROP MV successfully """ @@ -565,12 +571,36 @@ def test_materialized_views_auth(self): cassandra.execute("GRANT ALTER ON ks.cf TO cathy") cathy.execute(create_mv) - # TRY SELECT MV without SELECT permission on base table - assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on
or any of its parents") + # Try MODIFY base without WRITE permission on base + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") - # Grant SELECT permission and CREATE MV - cassandra.execute("GRANT SELECT ON ks.cf TO cathy") - cathy.execute("SELECT * FROM ks.mv1") + if self.cluster.version() >= LooseVersion('4.0'): + # From 4.0 onward, only base MODIFY permission is required to update base with MV + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + + # TRY SELECT MV without SELECT permission on base table + assert_unauthorized(cathy, "SELECT * FROM ks.cf", "User cathy has no SELECT permission on
or any of its parents") + assert_unauthorized(cathy, "SELECT * FROM ks.mv1", "User cathy has no SELECT permission on
or any of its parents") + + # Grant SELECT permission + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) + else: + # Before 4.0, MODIFY on MV is required to insert to base + # Grant WRITE permission on Base + cassandra.execute("GRANT MODIFY ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no SELECT permission on
or any of its parents") + cassandra.execute("GRANT SELECT ON ks.cf TO cathy") + assert_unauthorized(cathy, "INSERT INTO ks.cf(id, value) VALUES(1, '1')", "User cathy has no MODIFY permission on
or any of its parents") + + # Grant WRITE permission on MV + cassandra.execute("GRANT MODIFY ON ks.mv1 TO cathy") + cathy.execute("INSERT INTO ks.cf(id, value) VALUES(1, '1')") + assert_one(cathy, "SELECT * FROM ks.cf", [1, '1']) + assert_one(cathy, "SELECT * FROM ks.mv1", ['1', 1]) # Revoke ALTER permission and try DROP MV cassandra.execute("REVOKE ALTER ON ks.cf FROM cathy") From ba91f9d5360a67a2eec3ae85940996a353e518f0 Mon Sep 17 00:00:00 2001 From: Ruslan Fomkin Date: Tue, 8 Jun 2021 16:40:03 +0200 Subject: [PATCH 18/19] Add pytest cache and vscode folders to gitignore (#21) (cherry picked from commit b3bd616201ef5c41ad088954077a8467148c8626) (cherry picked from commit f2c37011d7dc21016fa33103625529e85994e595) --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 51321392fb..ed7f15ce3d 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ last_test_dir upgrade html/ doxygen/doxypy-0.4.2/ +.pytest_cache/ +.vscode/ From 387d68885e1bcb3330e4b6944a04f923ae59f0e0 Mon Sep 17 00:00:00 2001 From: Ruslan Fomkin Date: Fri, 11 Jun 2021 11:10:34 +0200 Subject: [PATCH 19/19] STAR-582 fix repair error on one node cluster (#20) Port of DB-1511, riptano/apollo-dtest#197 Co-authored-by: Zhao Yang (cherry picked from commit c7beefcbc97cd288badc48c483a3821dc694da58) (cherry picked from commit a02abc60c8f4a8fe70e6268a8103c62124d421c5) --- repair_tests/repair_test.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/repair_tests/repair_test.py b/repair_tests/repair_test.py index ddae777343..4ff6d024ea 100644 --- a/repair_tests/repair_test.py +++ b/repair_tests/repair_test.py @@ -1195,6 +1195,40 @@ def run_repair(): else: node1.nodetool('repair keyspace1 standard1 -inc -par') + @since('3.0') + def test_repair_one_node_cluster(self): + options = [] + fix_STAR582 = self.cluster.version() >= "4.0" + if not fix_STAR582: + options = ['--ignore-unreplicated-keyspaces'] + options + self._repair_abort_test(options=options, nodes=1, rf=2) + + @since('3.0') + def test_repair_one_node_in_local_dc(self): + self._repair_abort_test(options=['--ignore-unreplicated-keyspaces', '--in-local-dc'], nodes=[1, 1], rf={'dc1': 1, 'dc2': 1}, no_common_range=True) + + def _repair_abort_test(self, options=[], nodes=1, rf=1, no_common_range=False): + cluster = self.cluster + logger.debug("Starting cluster..") + cluster.populate(nodes).start(wait_for_binary_proto=True) + + node1 = self.cluster.nodelist()[0] + session = self.patient_cql_connection(node1) + create_ks(session, 'ks', rf=rf) + + support_preview = self.cluster.version() >= "4.0" + if support_preview: + logger.debug("Preview repair") + out = node1.repair(["--preview"] + options) + if no_common_range: + assert "Nothing to repair for " in str(out), "Expect 'Nothing to repair for '" + + logger.debug("Full repair") + node1.repair(["--full"] + options) + + logger.debug("Incremental repair") + node1.repair(options) + @since('2.2') def test_dead_sync_initiator(self): """