diff --git a/CHANGELOG.md b/CHANGELOG.md index 76114c602c..9eb9994d6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -28,6 +28,7 @@ Changes can also be flagged with a GitHub label for tracking purposes. The URL o ### Changed - Updated brand link url [#5656](https://github.com/ethyca/fides/pull/5656) - Changed "Reclassify" D&D button to show in an overflow menu when row actions are overcrowded [#5655](https://github.com/ethyca/fides/pull/5655) +- Removed primary key requirements for BigQuery and Postgres erasures [#5591](https://github.com/ethyca/fides/pull/5591) ### Fixed - Fixed issue where the custom report "reset" button was not working as expected [#5649](https://github.com/ethyca/fides/pull/5649) diff --git a/data/dataset/bigquery_enterprise_test_dataset.yml b/data/dataset/bigquery_enterprise_test_dataset.yml index 59d27e68a2..64668192d0 100644 --- a/data/dataset/bigquery_enterprise_test_dataset.yml +++ b/data/dataset/bigquery_enterprise_test_dataset.yml @@ -1,405 +1,149 @@ dataset: - - fides_key: enterprise_dsr_testing - organization_fides_key: default_organization - tags: null - name: Bigquery Enterprise Test Dataset - description: BigQuery dataset containing real data - meta: null - data_categories: null - fides_meta: - resource_id: enterprise_dsr_testing.prj-sandbox-55855.enterprise_dsr_testing - after: null - namespace: - dataset_id: enterprise_dsr_testing - project_id: prj-sandbox-55855 - collections: - - name: comments - description: null - data_categories: null - fields: - - name: creation_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: id - description: null - data_categories: - - system.operations - fides_meta: - references: null - identity: null - primary_key: true - data_type: integer - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: post_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: score - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: text - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: user_display_name - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: user_id - description: null - data_categories: - - user.contact - fides_meta: - references: - - dataset: enterprise_dsr_testing - field: users.id - direction: from - identity: null - primary_key: null - data_type: null - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - fides_meta: null - - name: post_history - description: null - data_categories: null - fields: - - name: comment - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: creation_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: id - description: null - data_categories: - - system.operations - fides_meta: - references: null - identity: null - primary_key: true - data_type: integer - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: post_history_type_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: post_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: revision_guid - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: text - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: user_id - description: null - data_categories: - - system.operations - fides_meta: - references: - - dataset: enterprise_dsr_testing - field: users.id - direction: from - identity: null - primary_key: null - data_type: null - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - fides_meta: null - - name: stackoverflow_posts - description: null - data_categories: null - fields: - - name: accepted_answer_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: answer_count - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: body - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: comment_count - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: community_owned_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: creation_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: favorite_count - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: id - description: null - data_categories: - - system.operations - fides_meta: - references: null - identity: null - primary_key: true - data_type: integer - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: last_activity_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: last_edit_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: last_editor_display_name - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: last_editor_user_id - description: null - data_categories: - - system.operations - fides_meta: - references: - - dataset: enterprise_dsr_testing - field: users.id - direction: from - identity: null - primary_key: null - data_type: null - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: owner_display_name - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: owner_user_id - description: null - data_categories: - - system.operations - fides_meta: - references: - - dataset: enterprise_dsr_testing - field: users.id - direction: from - identity: null - primary_key: null - data_type: integer - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: parent_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: post_type_id - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: score - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: tags - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: title - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: view_count - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - fides_meta: null - - name: users - description: null - data_categories: null - fields: - - name: about_me - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: age - description: null - data_categories: - - user - fides_meta: null - fields: null - - name: creation_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: display_name - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: down_votes - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: id - description: null - data_categories: - - user.contact - fides_meta: - references: null - identity: stackoverflow_user_id - primary_key: true - data_type: integer - length: null - return_all_elements: null - read_only: null - custom_request_field: null - fields: null - - name: last_access_date - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: location - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: profile_image_url - description: null - data_categories: - - user.contact - fides_meta: null - fields: null - - name: reputation - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: up_votes - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: views - description: null - data_categories: - - system.operations - fides_meta: null - fields: null - - name: website_url - description: null - data_categories: - - user - fides_meta: null - fields: null - fides_meta: - after: null - erase_after: - - enterprise_dsr_testing.comments - skip_processing: false - masking_strategy_override: null - partitioning: null + - fides_key: enterprise_dsr_testing + organization_fides_key: default_organization + name: Bigquery Enterprise Test Dataset + description: BigQuery dataset containing real data + fides_meta: + resource_id: enterprise_dsr_testing.prj-sandbox-55855.enterprise_dsr_testing + namespace: + dataset_id: enterprise_dsr_testing + project_id: prj-sandbox-55855 + collections: + - name: comments + fields: + - name: creation_date + data_categories: [system.operations] + - name: id + data_categories: [system.operations] + fides_meta: + data_type: integer + - name: post_id + data_categories: [system.operations] + - name: score + data_categories: [system.operations] + - name: text + data_categories: [user.contact] + - name: user_display_name + data_categories: [user.contact] + - name: user_id + data_categories: [user.contact] + fides_meta: + references: + - dataset: enterprise_dsr_testing + field: users.id + direction: from + - name: post_history + fields: + - name: comment + data_categories: [user.contact] + - name: creation_date + data_categories: [system.operations] + - name: id + data_categories: [system.operations] + fides_meta: + data_type: integer + - name: post_history_type_id + data_categories: [system.operations] + - name: post_id + data_categories: [system.operations] + - name: revision_guid + data_categories: [system.operations] + - name: text + data_categories: [user.contact] + - name: user_id + data_categories: [system.operations] + fides_meta: + references: + - dataset: enterprise_dsr_testing + field: users.id + direction: from + - name: stackoverflow_posts + fields: + - name: accepted_answer_id + data_categories: [system.operations] + - name: answer_count + data_categories: [system.operations] + - name: body + data_categories: [user.contact] + - name: comment_count + data_categories: [system.operations] + - name: community_owned_date + data_categories: [system.operations] + - name: creation_date + data_categories: [system.operations] + - name: favorite_count + data_categories: [system.operations] + - name: id + data_categories: [system.operations] + fides_meta: + data_type: integer + - name: last_activity_date + data_categories: [system.operations] + - name: last_edit_date + data_categories: [system.operations] + - name: last_editor_display_name + data_categories: [system.operations] + - name: last_editor_user_id + data_categories: [system.operations] + fides_meta: + references: + - dataset: enterprise_dsr_testing + field: users.id + direction: from + - name: owner_display_name + data_categories: [user.contact] + - name: owner_user_id + data_categories: [system.operations] + fides_meta: + references: + - dataset: enterprise_dsr_testing + field: users.id + direction: from + data_type: integer + - name: parent_id + data_categories: [system.operations] + - name: post_type_id + data_categories: [system.operations] + - name: score + data_categories: [system.operations] + - name: tags + data_categories: [system.operations] + - name: title + data_categories: [user.contact] + - name: view_count + data_categories: [system.operations] + - name: users + fields: + - name: about_me + data_categories: [user.contact] + - name: age + data_categories: [user] + - name: creation_date + data_categories: [system.operations] + - name: display_name + data_categories: [user.contact] + - name: down_votes + data_categories: [system.operations] + - name: id + data_categories: [user.contact] + fides_meta: + identity: stackoverflow_user_id + data_type: integer + - name: last_access_date + data_categories: [system.operations] + - name: location + data_categories: [user.contact] + - name: profile_image_url + data_categories: [user.contact] + - name: reputation + data_categories: [system.operations] + - name: up_votes + data_categories: [system.operations] + - name: views + data_categories: [system.operations] + - name: website_url + data_categories: [user] + fides_meta: + erase_after: + - enterprise_dsr_testing.comments + skip_processing: false diff --git a/data/dataset/bigquery_example_test_dataset.yml b/data/dataset/bigquery_example_test_dataset.yml index 11fdac1aba..c4ea16cb44 100644 --- a/data/dataset/bigquery_example_test_dataset.yml +++ b/data/dataset/bigquery_example_test_dataset.yml @@ -13,8 +13,6 @@ dataset: data_categories: [user.contact.address.street] - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: state data_categories: [user.contact.address.state] - name: street @@ -53,8 +51,6 @@ dataset: data_type: string - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: @@ -80,8 +76,6 @@ dataset: data_type: string - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: @@ -98,8 +92,6 @@ dataset: direction: from - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: time data_categories: [user.sensor] @@ -114,8 +106,6 @@ dataset: direction: from - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: shipping_address_id data_categories: [system.operations] fides_meta: @@ -166,8 +156,6 @@ dataset: direction: from - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: name data_categories: [user.financial] - name: preferred @@ -177,8 +165,6 @@ dataset: fields: - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: name data_categories: [system.operations] - name: price @@ -193,8 +179,6 @@ dataset: data_type: string - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: month data_categories: [system.operations] - name: name @@ -227,8 +211,6 @@ dataset: direction: from - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: opened data_categories: [system.operations] diff --git a/src/fides/api/service/connectors/base_connector.py b/src/fides/api/service/connectors/base_connector.py index ca3439f523..e1f735df1c 100644 --- a/src/fides/api/service/connectors/base_connector.py +++ b/src/fides/api/service/connectors/base_connector.py @@ -132,3 +132,14 @@ def execute_standalone_retrieval_query( raise NotImplementedError( "execute_standalone_retrieval_query must be implemented in a concrete subclass" ) + + @property + def requires_primary_keys(self) -> bool: + """ + Indicates if datasets linked to this connector require primary keys for erasures. + Defaults to True. + """ + + # Defaulting to true for now so we can keep the default behavior and + # incrementally determine the need for primary keys across all connectors + return True diff --git a/src/fides/api/service/connectors/bigquery_connector.py b/src/fides/api/service/connectors/bigquery_connector.py index 8b51f90842..ae6fe4b909 100644 --- a/src/fides/api/service/connectors/bigquery_connector.py +++ b/src/fides/api/service/connectors/bigquery_connector.py @@ -33,6 +33,11 @@ class BigQueryConnector(SQLConnector): secrets_schema = BigQuerySchema + @property + def requires_primary_keys(self) -> bool: + """BigQuery does not have the concept of primary keys so they're not required for erasures.""" + return False + # Overrides BaseConnector.build_uri def build_uri(self) -> str: """Build URI of format""" diff --git a/src/fides/api/service/connectors/postgres_connector.py b/src/fides/api/service/connectors/postgres_connector.py index 5354d4ec13..2abafc01c8 100644 --- a/src/fides/api/service/connectors/postgres_connector.py +++ b/src/fides/api/service/connectors/postgres_connector.py @@ -19,6 +19,11 @@ class PostgreSQLConnector(SQLConnector): secrets_schema = PostgreSQLSchema + @property + def requires_primary_keys(self) -> bool: + """Postgres allows arbitrary columns in the WHERE clause for updates so primary keys are not required.""" + return False + def build_uri(self) -> str: """Build URI of format postgresql://[user[:password]@][netloc][:port][/dbname]""" config = self.secrets_schema(**self.configuration.secrets or {}) diff --git a/src/fides/api/service/connectors/query_configs/bigquery_query_config.py b/src/fides/api/service/connectors/query_configs/bigquery_query_config.py index 681e2b9c60..6060ff5822 100644 --- a/src/fides/api/service/connectors/query_configs/bigquery_query_config.py +++ b/src/fides/api/service/connectors/query_configs/bigquery_query_config.py @@ -123,15 +123,15 @@ def generate_update( TODO: DRY up this method and `generate_delete` a bit """ update_value_map: Dict[str, Any] = self.update_value_map(row, policy, request) - non_empty_primary_keys: Dict[str, Field] = filter_nonempty_values( + non_empty_reference_field_keys: Dict[str, Field] = filter_nonempty_values( { fpath.string_path: fld.cast(row[fpath.string_path]) - for fpath, fld in self.primary_key_field_paths.items() + for fpath, fld in self.reference_field_paths.items() if fpath.string_path in row } ) - valid = len(non_empty_primary_keys) > 0 and update_value_map + valid = len(non_empty_reference_field_keys) > 0 and update_value_map if not valid: logger.warning( "There is not enough data to generate a valid update statement for {}", @@ -140,8 +140,8 @@ def generate_update( return [] table = Table(self._generate_table_name(), MetaData(bind=client), autoload=True) - pk_clauses: List[ColumnElement] = [ - getattr(table.c, k) == v for k, v in non_empty_primary_keys.items() + where_clauses: List[ColumnElement] = [ + getattr(table.c, k) == v for k, v in non_empty_reference_field_keys.items() ] if self.partitioning: @@ -153,13 +153,13 @@ def generate_update( for partition_clause in partition_clauses: partitioned_queries.append( table.update() - .where(*(pk_clauses + [text(partition_clause)])) + .where(*(where_clauses + [text(partition_clause)])) .values(**update_value_map) ) return partitioned_queries - return [table.update().where(*pk_clauses).values(**update_value_map)] + return [table.update().where(*where_clauses).values(**update_value_map)] def generate_delete(self, row: Row, client: Engine) -> List[Delete]: """Returns a List of SQLAlchemy DELETE statements for BigQuery. Does not actually execute the delete statement. @@ -172,15 +172,15 @@ def generate_delete(self, row: Row, client: Engine) -> List[Delete]: TODO: DRY up this method and `generate_update` a bit """ - non_empty_primary_keys: Dict[str, Field] = filter_nonempty_values( + non_empty_reference_field_keys: Dict[str, Field] = filter_nonempty_values( { fpath.string_path: fld.cast(row[fpath.string_path]) - for fpath, fld in self.primary_key_field_paths.items() + for fpath, fld in self.reference_field_paths.items() if fpath.string_path in row } ) - valid = len(non_empty_primary_keys) > 0 + valid = len(non_empty_reference_field_keys) > 0 if not valid: logger.warning( "There is not enough data to generate a valid DELETE statement for {}", @@ -189,8 +189,8 @@ def generate_delete(self, row: Row, client: Engine) -> List[Delete]: return [] table = Table(self._generate_table_name(), MetaData(bind=client), autoload=True) - pk_clauses: List[ColumnElement] = [ - getattr(table.c, k) == v for k, v in non_empty_primary_keys.items() + where_clauses: List[ColumnElement] = [ + getattr(table.c, k) == v for k, v in non_empty_reference_field_keys.items() ] if self.partitioning: @@ -202,9 +202,9 @@ def generate_delete(self, row: Row, client: Engine) -> List[Delete]: for partition_clause in partition_clauses: partitioned_queries.append( - table.delete().where(*(pk_clauses + [text(partition_clause)])) + table.delete().where(*(where_clauses + [text(partition_clause)])) ) return partitioned_queries - return [table.delete().where(*pk_clauses)] + return [table.delete().where(*where_clauses)] diff --git a/src/fides/api/service/connectors/query_configs/query_config.py b/src/fides/api/service/connectors/query_configs/query_config.py index 6e868964af..9f5ddb0251 100644 --- a/src/fides/api/service/connectors/query_configs/query_config.py +++ b/src/fides/api/service/connectors/query_configs/query_config.py @@ -100,6 +100,15 @@ def primary_key_field_paths(self) -> Dict[FieldPath, Field]: if field.primary_key } + @property + def reference_field_paths(self) -> Dict[FieldPath, Field]: + """Mapping of FieldPaths to Fields that have incoming identity or dataset references""" + return { + field_path: field + for field_path, field in self.field_map().items() + if field_path in {edge.f2.field_path for edge in self.node.incoming_edges} + } + def query_sources(self) -> Dict[str, List[CollectionAddress]]: """Display the input collection(s) for each query key for display purposes. @@ -412,14 +421,16 @@ def generate_query_without_tuples( # pylint: disable=R0914 def get_update_stmt( self, update_clauses: List[str], - pk_clauses: List[str], + where_clauses: List[str], ) -> str: """Returns a SQL UPDATE statement to fit SQL syntax.""" - return f"UPDATE {self.node.address.collection} SET {', '.join(update_clauses)} WHERE {' AND '.join(pk_clauses)}" + return f"UPDATE {self.node.address.collection} SET {', '.join(update_clauses)} WHERE {' AND '.join(where_clauses)}" @abstractmethod def get_update_clauses( - self, update_value_map: Dict[str, Any], non_empty_primary_keys: Dict[str, Field] + self, + update_value_map: Dict[str, Any], + where_clause_fields: Dict[str, Field], ) -> List[str]: """Returns a list of update clauses for the update statement.""" @@ -428,7 +439,7 @@ def format_query_stmt(self, query_str: str, update_value_map: Dict[str, Any]) -> """Returns a formatted update statement in the appropriate dialect.""" @abstractmethod - def format_key_map_for_update_stmt(self, fields: List[str]) -> List[str]: + def format_key_map_for_update_stmt(self, param_map: Dict[str, Any]) -> List[str]: """Adds the appropriate formatting for update statements in this datastore.""" def generate_update_stmt( @@ -436,7 +447,8 @@ def generate_update_stmt( ) -> Optional[T]: """Returns an update statement in generic SQL-ish dialect.""" update_value_map: Dict[str, Any] = self.update_value_map(row, policy, request) - non_empty_primary_keys: Dict[str, Field] = filter_nonempty_values( + + non_empty_primary_key_fields: Dict[str, Field] = filter_nonempty_values( { fpath.string_path: fld.cast(row[fpath.string_path]) for fpath, fld in self.primary_key_field_paths.items() @@ -444,17 +456,30 @@ def generate_update_stmt( } ) + non_empty_reference_fields: Dict[str, Field] = filter_nonempty_values( + { + fpath.string_path: fld.cast(row[fpath.string_path]) + for fpath, fld in self.reference_field_paths.items() + if fpath.string_path in row + } + ) + + # Create parameter mappings with masked_ prefix for SET values + param_map = { + **{f"masked_{k}": v for k, v in update_value_map.items()}, + **non_empty_primary_key_fields, + **non_empty_reference_fields, + } + update_clauses = self.get_update_clauses( - update_value_map, non_empty_primary_keys + {k: f"masked_{k}" for k in update_value_map}, + non_empty_primary_key_fields or non_empty_reference_fields, ) - pk_clauses = self.format_key_map_for_update_stmt( - list(non_empty_primary_keys.keys()) + where_clauses = self.format_key_map_for_update_stmt( + {k: k for k in non_empty_primary_key_fields or non_empty_reference_fields} ) - for k, v in non_empty_primary_keys.items(): - update_value_map[k] = v - - valid = len(pk_clauses) > 0 and len(update_clauses) > 0 + valid = len(where_clauses) > 0 and len(update_clauses) > 0 if not valid: logger.warning( "There is not enough data to generate a valid update statement for {}", @@ -462,12 +487,9 @@ def generate_update_stmt( ) return None - query_str = self.get_update_stmt( - update_clauses, - pk_clauses, - ) - logger.info("query = {}, params = {}", Pii(query_str), Pii(update_value_map)) - return self.format_query_stmt(query_str, update_value_map) + query_str = self.get_update_stmt(update_clauses, where_clauses) + logger.info("query = {}, params = {}", Pii(query_str), Pii(param_map)) + return self.format_query_stmt(query_str, param_map) class SQLQueryConfig(SQLLikeQueryConfig[Executable]): @@ -538,16 +560,17 @@ def generate_query( ) return None - def format_key_map_for_update_stmt(self, fields: List[str]) -> List[str]: + def format_key_map_for_update_stmt(self, param_map: Dict[str, Any]) -> List[str]: """Adds the appropriate formatting for update statements in this datastore.""" - fields.sort() - return [f"{k} = :{k}" for k in fields] + return [f"{k} = :{v}" for k, v in sorted(param_map.items())] def get_update_clauses( - self, update_value_map: Dict[str, Any], non_empty_primary_keys: Dict[str, Field] + self, + update_value_map: Dict[str, Any], + where_clause_fields: Dict[str, Field], ) -> List[str]: """Returns a list of update clauses for the update statement.""" - return self.format_key_map_for_update_stmt(list(update_value_map.keys())) + return self.format_key_map_for_update_stmt(update_value_map) def format_query_stmt( self, query_str: str, update_value_map: Dict[str, Any] diff --git a/src/fides/api/service/connectors/query_configs/snowflake_query_config.py b/src/fides/api/service/connectors/query_configs/snowflake_query_config.py index 574e1ea1b1..ec640191d8 100644 --- a/src/fides/api/service/connectors/query_configs/snowflake_query_config.py +++ b/src/fides/api/service/connectors/query_configs/snowflake_query_config.py @@ -59,15 +59,14 @@ def get_formatted_query_string( """Returns a query string with double quotation mark formatting as required by Snowflake syntax.""" return f'SELECT {field_list} FROM {self._generate_table_name()} WHERE ({" OR ".join(clauses)})' - def format_key_map_for_update_stmt(self, fields: List[str]) -> List[str]: + def format_key_map_for_update_stmt(self, param_map: Dict[str, Any]) -> List[str]: """Adds the appropriate formatting for update statements in this datastore.""" - fields.sort() - return [f'"{k}" = :{k}' for k in fields] + return [f'"{k}" = :{v}' for k, v in sorted(param_map.items())] def get_update_stmt( self, update_clauses: List[str], - pk_clauses: List[str], + where_clauses: List[str], ) -> str: """Returns a parameterized update statement in Snowflake dialect.""" - return f'UPDATE {self._generate_table_name()} SET {", ".join(update_clauses)} WHERE {" AND ".join(pk_clauses)}' + return f'UPDATE {self._generate_table_name()} SET {", ".join(update_clauses)} WHERE {" AND ".join(where_clauses)}' diff --git a/src/fides/api/service/connectors/scylla_connector.py b/src/fides/api/service/connectors/scylla_connector.py index 43a821930c..ff17674b88 100644 --- a/src/fides/api/service/connectors/scylla_connector.py +++ b/src/fides/api/service/connectors/scylla_connector.py @@ -28,6 +28,11 @@ class ScyllaConnectorMissingKeyspace(Exception): class ScyllaConnector(BaseConnector[Cluster]): """Scylla Connector""" + @property + def requires_primary_keys(self) -> bool: + """ScyllaDB requires primary keys for erasures.""" + return True + def build_uri(self) -> str: """ Builds URI - Not yet implemented diff --git a/src/fides/api/service/connectors/scylla_query_config.py b/src/fides/api/service/connectors/scylla_query_config.py index 2a72270a40..1fa52d573d 100644 --- a/src/fides/api/service/connectors/scylla_query_config.py +++ b/src/fides/api/service/connectors/scylla_query_config.py @@ -70,21 +70,27 @@ def generate_query( ) -> Optional[ScyllaDBStatement]: return self.generate_query_without_tuples(input_data, policy) - def format_key_map_for_update_stmt(self, fields: List[str]) -> List[str]: + def format_key_map_for_update_stmt(self, param_map: Dict[str, Any]) -> List[str]: """Adds the appropriate formatting for update statements in this datastore.""" - fields.sort() - return [f"{k} = %({k})s" for k in fields] + return [f"{k} = %({v})s" for k, v in sorted(param_map.items())] def get_update_clauses( - self, update_value_map: Dict[str, Any], non_empty_primary_keys: Dict[str, Field] + self, + update_value_map: Dict[str, Any], + where_clause_fields: Dict[str, Field], ) -> List[str]: - """Returns a list of update clauses for the update statement.""" + """Returns a list of update clauses for the update statement. + + Omits primary key fields from updates since ScyllaDB prohibits + updating primary key fields. + """ + return self.format_key_map_for_update_stmt( - [ - key - for key in update_value_map.keys() - if key not in non_empty_primary_keys - ] + { + key: value + for key, value in update_value_map.items() + if key not in where_clause_fields + } ) def format_query_data_name(self, query_data_name: str) -> str: diff --git a/src/fides/api/task/graph_task.py b/src/fides/api/task/graph_task.py index 6b78b57297..145094ea25 100644 --- a/src/fides/api/task/graph_task.py +++ b/src/fides/api/task/graph_task.py @@ -603,12 +603,19 @@ def erasure_request( *erasure_prereqs: int, # TODO Remove when we stop support for DSR 2.0. DSR 3.0 enforces with downstream_tasks. ) -> int: """Run erasure request""" + # if there is no primary key specified in the graph node configuration # note this in the execution log and perform no erasures on this node - if not self.execution_node.collection.contains_field(lambda f: f.primary_key): + if ( + self.connector.requires_primary_keys + and not self.execution_node.collection.contains_field( + lambda f: f.primary_key + ) + ): logger.warning( - "No erasures on {} as there is no primary_key defined.", + 'Skipping erasures on "{}" as the "{}" connector requires a primary key to be defined in one of the collection fields, but none was found.', self.execution_node.address, + self.connector.configuration.connection_type, ) if self.request_task.id: # For DSR 3.0, largely for testing. DSR 3.0 uses Request Task status @@ -617,7 +624,7 @@ def erasure_request( # TODO Remove when we stop support for DSR 2.0 self.resources.cache_erasure(self.key.value, 0) self.update_status( - "No values were erased since no primary key was defined for this collection", + "No values were erased since no primary key was defined in any of the fields for this collection", None, ActionType.erasure, ExecutionLogStatus.complete, diff --git a/src/fides/data/sample_project/sample_resources/postgres_example_custom_request_field_dataset.yml b/src/fides/data/sample_project/sample_resources/postgres_example_custom_request_field_dataset.yml index 96b58645d4..0171258b1d 100644 --- a/src/fides/data/sample_project/sample_resources/postgres_example_custom_request_field_dataset.yml +++ b/src/fides/data/sample_project/sample_resources/postgres_example_custom_request_field_dataset.yml @@ -2,7 +2,7 @@ dataset: - fides_key: postgres_example_custom_request_field_dataset data_categories: [] description: Postgres example dataset with a custom request field - name: Postgrex Example Custom Request Field Dataset + name: Postgres Example Custom Request Field Dataset collections: - name: dynamic_email_address_config fields: diff --git a/tests/fixtures/application_fixtures.py b/tests/fixtures/application_fixtures.py index 8356c42111..d030919aed 100644 --- a/tests/fixtures/application_fixtures.py +++ b/tests/fixtures/application_fixtures.py @@ -864,6 +864,59 @@ def erasure_policy( "rule_id": erasure_rule.id, }, ) + + yield erasure_policy + try: + rule_target.delete(db) + except ObjectDeletedError: + pass + try: + erasure_rule.delete(db) + except ObjectDeletedError: + pass + try: + erasure_policy.delete(db) + except ObjectDeletedError: + pass + + +@pytest.fixture(scope="function") +def erasure_policy_address_city( + db: Session, + oauth_client: ClientDetail, +) -> Generator: + erasure_policy = Policy.create( + db=db, + data={ + "name": "example erasure policy", + "key": "example_erasure_policy", + "client_id": oauth_client.id, + }, + ) + + erasure_rule = Rule.create( + db=db, + data={ + "action_type": ActionType.erasure.value, + "client_id": oauth_client.id, + "name": "Erasure Rule", + "policy_id": erasure_policy.id, + "masking_strategy": { + "strategy": "null_rewrite", + "configuration": {}, + }, + }, + ) + + rule_target = RuleTarget.create( + db=db, + data={ + "client_id": oauth_client.id, + "data_category": DataCategory("user.contact.address.city").value, + "rule_id": erasure_rule.id, + }, + ) + yield erasure_policy try: rule_target.delete(db) diff --git a/tests/fixtures/postgres_fixtures.py b/tests/fixtures/postgres_fixtures.py index 5e2aaec047..6db641fd1d 100644 --- a/tests/fixtures/postgres_fixtures.py +++ b/tests/fixtures/postgres_fixtures.py @@ -2,6 +2,7 @@ from uuid import uuid4 import pytest +from fideslang.models import Dataset as FideslangDataset from sqlalchemy.orm import Session from sqlalchemy.orm.exc import ObjectDeletedError from sqlalchemy_utils.functions import drop_database @@ -23,6 +24,7 @@ from fides.api.models.sql_models import System from fides.api.service.connectors import PostgreSQLConnector from fides.config import CONFIG +from tests.ops.test_helpers.dataset_utils import remove_primary_keys from tests.ops.test_helpers.db_utils import seed_postgres_data from .application_fixtures import integration_secrets @@ -111,6 +113,34 @@ def postgres_example_test_dataset_config_read_access( ctl_dataset.delete(db=db) +@pytest.fixture +def postgres_example_test_dataset_config_read_access_without_primary_keys( + read_connection_config: ConnectionConfig, + db: Session, + example_datasets: List[Dict], +) -> Generator: + postgres_dataset = example_datasets[0] + fides_key = postgres_dataset["fides_key"] + + dataset = FideslangDataset(**postgres_dataset) + updated_dataset = remove_primary_keys(dataset) + ctl_dataset = CtlDataset.create_from_dataset_dict( + db, updated_dataset.model_dump(mode="json") + ) + + dataset = DatasetConfig.create( + db=db, + data={ + "connection_config_id": read_connection_config.id, + "fides_key": fides_key, + "ctl_dataset_id": ctl_dataset.id, + }, + ) + yield dataset + dataset.delete(db=db) + ctl_dataset.delete(db=db) + + @pytest.fixture def postgres_example_test_dataset_config_skipped_login_collection( read_connection_config: ConnectionConfig, diff --git a/tests/ops/integration_tests/test_mariadb_task.py b/tests/ops/integration_tests/test_mariadb_task.py new file mode 100644 index 0000000000..3951a2830a --- /dev/null +++ b/tests/ops/integration_tests/test_mariadb_task.py @@ -0,0 +1,101 @@ +import pytest + +from fides.api.models.privacy_request import ExecutionLog + +from ...conftest import access_runner_tester +from ..graph.graph_test_util import assert_rows_match, records_matching_fields +from ..task.traversal_data import integration_db_graph + + +@pytest.mark.integration_mariadb +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_mariadb_access_request_task( + db, + policy, + connection_config_mariadb, + mariadb_integration_db, + dsr_version, + request, + privacy_request, +) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + v = access_runner_tester( + privacy_request, + policy, + integration_db_graph("my_maria_db_1"), + [connection_config_mariadb], + {"email": "customer-1@example.com"}, + db, + ) + + assert_rows_match( + v["my_maria_db_1:address"], + min_size=2, + keys=["id", "street", "city", "state", "zip"], + ) + assert_rows_match( + v["my_maria_db_1:orders"], + min_size=3, + keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], + ) + assert_rows_match( + v["my_maria_db_1:payment_card"], + min_size=2, + keys=["id", "name", "ccn", "customer_id", "billing_address_id"], + ) + assert_rows_match( + v["my_maria_db_1:customer"], + min_size=1, + keys=["id", "name", "email", "address_id"], + ) + + # links + assert v["my_maria_db_1:customer"][0]["email"] == "customer-1@example.com" + + logs = ( + ExecutionLog.query(db=db) + .filter(ExecutionLog.privacy_request_id == privacy_request.id) + .all() + ) + + logs = [log.__dict__ for log in logs] + assert ( + len( + records_matching_fields( + logs, dataset_name="my_maria_db_1", collection_name="customer" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_maria_db_1", collection_name="address" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_maria_db_1", collection_name="orders" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, + dataset_name="my_maria_db_1", + collection_name="payment_card", + ) + ) + > 0 + ) diff --git a/tests/ops/integration_tests/test_mssql_task.py b/tests/ops/integration_tests/test_mssql_task.py new file mode 100644 index 0000000000..6bc23eeda0 --- /dev/null +++ b/tests/ops/integration_tests/test_mssql_task.py @@ -0,0 +1,101 @@ +import pytest + +from fides.api.models.privacy_request import ExecutionLog + +from ...conftest import access_runner_tester +from ..graph.graph_test_util import assert_rows_match, records_matching_fields +from ..task.traversal_data import integration_db_graph + + +@pytest.mark.integration_mssql +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_mssql_access_request_task( + db, + policy, + connection_config_mssql, + mssql_integration_db, + privacy_request, + dsr_version, + request, +) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + v = access_runner_tester( + privacy_request, + policy, + integration_db_graph("my_mssql_db_1"), + [connection_config_mssql], + {"email": "customer-1@example.com"}, + db, + ) + + assert_rows_match( + v["my_mssql_db_1:address"], + min_size=2, + keys=["id", "street", "city", "state", "zip"], + ) + assert_rows_match( + v["my_mssql_db_1:orders"], + min_size=3, + keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], + ) + assert_rows_match( + v["my_mssql_db_1:payment_card"], + min_size=2, + keys=["id", "name", "ccn", "customer_id", "billing_address_id"], + ) + assert_rows_match( + v["my_mssql_db_1:customer"], + min_size=1, + keys=["id", "name", "email", "address_id"], + ) + + # links + assert v["my_mssql_db_1:customer"][0]["email"] == "customer-1@example.com" + + logs = ( + ExecutionLog.query(db=db) + .filter(ExecutionLog.privacy_request_id == privacy_request.id) + .all() + ) + + logs = [log.__dict__ for log in logs] + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mssql_db_1", collection_name="customer" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mssql_db_1", collection_name="address" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mssql_db_1", collection_name="orders" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, + dataset_name="my_mssql_db_1", + collection_name="payment_card", + ) + ) + > 0 + ) diff --git a/tests/ops/integration_tests/test_mysql_task.py b/tests/ops/integration_tests/test_mysql_task.py new file mode 100644 index 0000000000..40551dd4d9 --- /dev/null +++ b/tests/ops/integration_tests/test_mysql_task.py @@ -0,0 +1,101 @@ +import pytest + +from fides.api.models.privacy_request import ExecutionLog + +from ...conftest import access_runner_tester +from ..graph.graph_test_util import assert_rows_match, records_matching_fields +from ..task.traversal_data import integration_db_graph + + +@pytest.mark.integration +@pytest.mark.integration_mysql +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_mysql_access_request_task( + db, + policy, + connection_config_mysql, + mysql_integration_db, + privacy_request, + dsr_version, + request, +) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + v = access_runner_tester( + privacy_request, + policy, + integration_db_graph("my_mysql_db_1"), + [connection_config_mysql], + {"email": "customer-1@example.com"}, + db, + ) + + assert_rows_match( + v["my_mysql_db_1:address"], + min_size=2, + keys=["id", "street", "city", "state", "zip"], + ) + assert_rows_match( + v["my_mysql_db_1:orders"], + min_size=3, + keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], + ) + assert_rows_match( + v["my_mysql_db_1:payment_card"], + min_size=2, + keys=["id", "name", "ccn", "customer_id", "billing_address_id"], + ) + assert_rows_match( + v["my_mysql_db_1:customer"], + min_size=1, + keys=["id", "name", "email", "address_id"], + ) + + # links + assert v["my_mysql_db_1:customer"][0]["email"] == "customer-1@example.com" + + logs = ( + ExecutionLog.query(db=db) + .filter(ExecutionLog.privacy_request_id == privacy_request.id) + .all() + ) + + logs = [log.__dict__ for log in logs] + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mysql_db_1", collection_name="customer" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mysql_db_1", collection_name="address" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, dataset_name="my_mysql_db_1", collection_name="orders" + ) + ) + > 0 + ) + assert ( + len( + records_matching_fields( + logs, + dataset_name="my_mysql_db_1", + collection_name="payment_card", + ) + ) + > 0 + ) diff --git a/tests/ops/integration_tests/test_scylladb_task.py b/tests/ops/integration_tests/test_scylladb_task.py new file mode 100644 index 0000000000..8ced1317ad --- /dev/null +++ b/tests/ops/integration_tests/test_scylladb_task.py @@ -0,0 +1,190 @@ +import pytest +from sqlalchemy.orm import Session + +from fides.api.models.privacy_request import ExecutionLogStatus, PrivacyRequest +from fides.api.service.connectors.scylla_connector import ScyllaConnectorMissingKeyspace +from fides.api.task.graph_task import get_cached_data_for_erasures + +from ...conftest import access_runner_tester, erasure_runner_tester +from ..graph.graph_test_util import assert_rows_match, erasure_policy +from ..task.traversal_data import integration_scylladb_graph + + +@pytest.mark.integration +@pytest.mark.integration_scylladb +@pytest.mark.asyncio +class TestScyllaDSRs: + @pytest.mark.parametrize( + "dsr_version", + ["use_dsr_2_0"], + ) + async def test_scylladb_access_request_task_no_keyspace_dsr2( + self, + db: Session, + policy, + integration_scylladb_config, + scylladb_integration_no_keyspace, + privacy_request, + dsr_version, + request, + ) -> None: + request.getfixturevalue(dsr_version) + + with pytest.raises(ScyllaConnectorMissingKeyspace) as err: + v = access_runner_tester( + privacy_request, + policy, + integration_scylladb_graph("scylla_example"), + [integration_scylladb_config], + {"email": "customer-1@example.com"}, + db, + ) + + assert ( + "No keyspace provided in the ScyllaDB configuration for connector scylla_example" + in str(err.value) + ) + + @pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0"], + ) + async def test_scylladb_access_request_task_no_keyspace_dsr3( + self, + db, + policy, + integration_scylladb_config, + scylladb_integration_no_keyspace, + privacy_request: PrivacyRequest, + dsr_version, + request, + ) -> None: + request.getfixturevalue(dsr_version) + v = access_runner_tester( + privacy_request, + policy, + integration_scylladb_graph("scylla_example"), + [integration_scylladb_config], + {"email": "customer-1@example.com"}, + db, + ) + + assert v == {} + assert ( + privacy_request.access_tasks.count() == 6 + ) # There's 4 tables plus the root and terminal "dummy" tasks + + # Root task should be completed + assert privacy_request.access_tasks.first().collection_name == "__ROOT__" + assert ( + privacy_request.access_tasks.first().status == ExecutionLogStatus.complete + ) + + # All other tasks should be error + for access_task in privacy_request.access_tasks.offset(1): + assert access_task.status == ExecutionLogStatus.error + + @pytest.mark.parametrize( + "dsr_version", + ["use_dsr_2_0", "use_dsr_3_0"], + ) + async def test_scylladb_access_request_task( + self, + db, + policy, + integration_scylladb_config_with_keyspace, + scylla_reset_db, + scylladb_integration_with_keyspace, + privacy_request, + dsr_version, + request, + ) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + results = access_runner_tester( + privacy_request, + policy, + integration_scylladb_graph("scylla_example_with_keyspace"), + [integration_scylladb_config_with_keyspace], + {"email": "customer-1@example.com"}, + db, + ) + + assert_rows_match( + results["scylla_example_with_keyspace:users"], + min_size=1, + keys=[ + "age", + "alternative_contacts", + "do_not_contact", + "email", + "name", + "last_contacted", + "logins", + "states_lived", + ], + ) + assert_rows_match( + results["scylla_example_with_keyspace:user_activity"], + min_size=3, + keys=["timestamp", "user_agent", "activity_type"], + ) + assert_rows_match( + results["scylla_example_with_keyspace:payment_methods"], + min_size=2, + keys=["card_number", "expiration_date"], + ) + assert_rows_match( + results["scylla_example_with_keyspace:orders"], + min_size=2, + keys=["order_amount", "order_date", "order_description"], + ) + + @pytest.mark.parametrize( + "dsr_version", + ["use_dsr_2_0", "use_dsr_3_0"], + ) + async def test_scylladb_erasure_task( + self, + db, + integration_scylladb_config_with_keyspace, + scylladb_integration_with_keyspace, + scylla_reset_db, + privacy_request, + dsr_version, + request, + ): + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + seed_email = "customer-1@example.com" + + policy = erasure_policy( + db, "user.name", "user.behavior", "user.device", "user.payment" + ) + privacy_request.policy_id = policy.id + privacy_request.save(db) + + graph = integration_scylladb_graph("scylla_example_with_keyspace") + access_runner_tester( + privacy_request, + policy, + integration_scylladb_graph("scylla_example_with_keyspace"), + [integration_scylladb_config_with_keyspace], + {"email": seed_email}, + db, + ) + results = erasure_runner_tester( + privacy_request, + policy, + graph, + [integration_scylladb_config_with_keyspace], + {"email": seed_email}, + get_cached_data_for_erasures(privacy_request.id), + db, + ) + assert results == { + "scylla_example_with_keyspace:user_activity": 3, + "scylla_example_with_keyspace:users": 1, + "scylla_example_with_keyspace:payment_methods": 2, + "scylla_example_with_keyspace:orders": 2, + } diff --git a/tests/ops/integration_tests/test_sql_task.py b/tests/ops/integration_tests/test_sql_task.py index 298d77229a..bbac30df82 100644 --- a/tests/ops/integration_tests/test_sql_task.py +++ b/tests/ops/integration_tests/test_sql_task.py @@ -6,30 +6,16 @@ import pytest from fideslang import Dataset from sqlalchemy import text -from sqlalchemy.orm import Session - -from fides.api.graph.config import ( - Collection, - CollectionAddress, - FieldAddress, - GraphDataset, - ScalarField, -) + +from fides.api.graph.config import Collection, FieldAddress, GraphDataset, ScalarField from fides.api.graph.data_type import DataType, StringTypeConverter from fides.api.graph.graph import DatasetGraph, Edge, Node from fides.api.graph.traversal import TraversalNode from fides.api.models.connectionconfig import ConnectionConfig from fides.api.models.datasetconfig import convert_dataset_to_graph from fides.api.models.policy import ActionType, Policy, Rule, RuleTarget -from fides.api.models.privacy_request import ( - ExecutionLog, - ExecutionLogStatus, - PrivacyRequest, - PrivacyRequestStatus, - RequestTask, -) +from fides.api.models.privacy_request import ExecutionLog, RequestTask from fides.api.service.connectors import get_connector -from fides.api.service.connectors.scylla_connector import ScyllaConnectorMissingKeyspace from fides.api.task.filter_results import filter_data_categories from fides.api.task.graph_task import get_cached_data_for_erasures from fides.config import CONFIG @@ -42,12 +28,7 @@ field, records_matching_fields, ) -from ..task.traversal_data import ( - integration_db_graph, - integration_scylladb_graph, - postgres_db_graph_dataset, - str_converter, -) +from ..task.traversal_data import integration_db_graph, postgres_db_graph_dataset @pytest.mark.integration_postgres @@ -57,7 +38,7 @@ "dsr_version", ["use_dsr_3_0", "use_dsr_2_0"], ) -async def test_sql_erasure_ignores_collections_without_pk( +async def test_sql_erasure_does_not_ignore_collections_without_pk( db, postgres_inserts, integration_postgres_config, @@ -116,7 +97,7 @@ async def test_sql_erasure_ignores_collections_without_pk( .all() ) logs = [log.__dict__ for log in logs] - # since address has no primary_key=True field, it's erasure is skipped + # erasure is not skipped since primary_key is not required assert ( len( records_matching_fields( @@ -126,13 +107,13 @@ async def test_sql_erasure_ignores_collections_without_pk( message="No values were erased since no primary key was defined for this collection", ) ) - == 1 + == 0 ) assert v == { "postgres_example:customer": 1, "postgres_example:payment_card": 0, "postgres_example:orders": 0, - "postgres_example:address": 0, + "postgres_example:address": 2, } @@ -456,516 +437,55 @@ async def test_postgres_privacy_requests_against_non_default_schema( db, ) - # Confirm data retrieved from backup_schema, not public schema. This data only exists in the backup_schema. - assert access_results == { - f"{database_name}:address": [ - { - "id": 7, - "street": "Test Street", - "city": "Test Town", - "state": "TX", - "zip": "79843", - } - ], - f"{database_name}:payment_card": [], - f"{database_name}:orders": [], - f"{database_name}:customer": [ - { - "id": 1, - "name": "Johanna Customer", - "email": "customer-500@example.com", - "address_id": 7, - } - ], - } - - erasure_results = erasure_runner_tester( - privacy_request_with_erasure_policy, - erasure_policy, - graph, - [postgres_connection_config_with_schema], - {"email": customer_email}, - get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), - db, - ) - - # Confirm record masked in non-default schema - assert erasure_results == { - f"{database_name}:customer": 1, - f"{database_name}:payment_card": 0, - f"{database_name}:orders": 0, - f"{database_name}:address": 0, - }, "Only one record on customer table has targeted data category" - customer_records = postgres_integration_db.execute( - text("select * from backup_schema.customer where id = 1;") - ) - johanna_record = [c for c in customer_records][0] - assert johanna_record.email == customer_email # Not masked - assert johanna_record.name is None # Masked by erasure request - - -@pytest.mark.integration_mssql -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_mssql_access_request_task( - db, - policy, - connection_config_mssql, - mssql_integration_db, - privacy_request, - dsr_version, - request, -) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - v = access_runner_tester( - privacy_request, - policy, - integration_db_graph("my_mssql_db_1"), - [connection_config_mssql], - {"email": "customer-1@example.com"}, - db, - ) - - assert_rows_match( - v["my_mssql_db_1:address"], - min_size=2, - keys=["id", "street", "city", "state", "zip"], - ) - assert_rows_match( - v["my_mssql_db_1:orders"], - min_size=3, - keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], - ) - assert_rows_match( - v["my_mssql_db_1:payment_card"], - min_size=2, - keys=["id", "name", "ccn", "customer_id", "billing_address_id"], - ) - assert_rows_match( - v["my_mssql_db_1:customer"], - min_size=1, - keys=["id", "name", "email", "address_id"], - ) - - # links - assert v["my_mssql_db_1:customer"][0]["email"] == "customer-1@example.com" - - logs = ( - ExecutionLog.query(db=db) - .filter(ExecutionLog.privacy_request_id == privacy_request.id) - .all() - ) - - logs = [log.__dict__ for log in logs] - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mssql_db_1", collection_name="customer" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mssql_db_1", collection_name="address" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mssql_db_1", collection_name="orders" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, - dataset_name="my_mssql_db_1", - collection_name="payment_card", - ) - ) - > 0 - ) - - -@pytest.mark.integration -@pytest.mark.integration_mysql -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_mysql_access_request_task( - db, - policy, - connection_config_mysql, - mysql_integration_db, - privacy_request, - dsr_version, - request, -) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - v = access_runner_tester( - privacy_request, - policy, - integration_db_graph("my_mysql_db_1"), - [connection_config_mysql], - {"email": "customer-1@example.com"}, - db, - ) - - assert_rows_match( - v["my_mysql_db_1:address"], - min_size=2, - keys=["id", "street", "city", "state", "zip"], - ) - assert_rows_match( - v["my_mysql_db_1:orders"], - min_size=3, - keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], - ) - assert_rows_match( - v["my_mysql_db_1:payment_card"], - min_size=2, - keys=["id", "name", "ccn", "customer_id", "billing_address_id"], - ) - assert_rows_match( - v["my_mysql_db_1:customer"], - min_size=1, - keys=["id", "name", "email", "address_id"], - ) - - # links - assert v["my_mysql_db_1:customer"][0]["email"] == "customer-1@example.com" - - logs = ( - ExecutionLog.query(db=db) - .filter(ExecutionLog.privacy_request_id == privacy_request.id) - .all() - ) - - logs = [log.__dict__ for log in logs] - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mysql_db_1", collection_name="customer" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mysql_db_1", collection_name="address" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_mysql_db_1", collection_name="orders" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, - dataset_name="my_mysql_db_1", - collection_name="payment_card", - ) - ) - > 0 - ) - - -@pytest.mark.integration_mariadb -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_mariadb_access_request_task( - db, - policy, - connection_config_mariadb, - mariadb_integration_db, - dsr_version, - request, - privacy_request, -) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - v = access_runner_tester( - privacy_request, - policy, - integration_db_graph("my_maria_db_1"), - [connection_config_mariadb], - {"email": "customer-1@example.com"}, - db, - ) - - assert_rows_match( - v["my_maria_db_1:address"], - min_size=2, - keys=["id", "street", "city", "state", "zip"], - ) - assert_rows_match( - v["my_maria_db_1:orders"], - min_size=3, - keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], - ) - assert_rows_match( - v["my_maria_db_1:payment_card"], - min_size=2, - keys=["id", "name", "ccn", "customer_id", "billing_address_id"], - ) - assert_rows_match( - v["my_maria_db_1:customer"], - min_size=1, - keys=["id", "name", "email", "address_id"], - ) - - # links - assert v["my_maria_db_1:customer"][0]["email"] == "customer-1@example.com" - - logs = ( - ExecutionLog.query(db=db) - .filter(ExecutionLog.privacy_request_id == privacy_request.id) - .all() - ) - - logs = [log.__dict__ for log in logs] - assert ( - len( - records_matching_fields( - logs, dataset_name="my_maria_db_1", collection_name="customer" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_maria_db_1", collection_name="address" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, dataset_name="my_maria_db_1", collection_name="orders" - ) - ) - > 0 - ) - assert ( - len( - records_matching_fields( - logs, - dataset_name="my_maria_db_1", - collection_name="payment_card", - ) - ) - > 0 - ) - - -@pytest.mark.integration -@pytest.mark.integration_scylladb -@pytest.mark.asyncio -class TestScyllaDSRs: - @pytest.mark.parametrize( - "dsr_version", - ["use_dsr_2_0"], - ) - async def test_scylladb_access_request_task_no_keyspace_dsr2( - self, - db: Session, - policy, - integration_scylladb_config, - scylladb_integration_no_keyspace, - privacy_request, - dsr_version, - request, - ) -> None: - request.getfixturevalue(dsr_version) - - with pytest.raises(ScyllaConnectorMissingKeyspace) as err: - v = access_runner_tester( - privacy_request, - policy, - integration_scylladb_graph("scylla_example"), - [integration_scylladb_config], - {"email": "customer-1@example.com"}, - db, - ) - - assert ( - "No keyspace provided in the ScyllaDB configuration for connector scylla_example" - in str(err.value) - ) - - @pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0"], - ) - async def test_scylladb_access_request_task_no_keyspace_dsr3( - self, - db, - policy, - integration_scylladb_config, - scylladb_integration_no_keyspace, - privacy_request: PrivacyRequest, - dsr_version, - request, - ) -> None: - request.getfixturevalue(dsr_version) - v = access_runner_tester( - privacy_request, - policy, - integration_scylladb_graph("scylla_example"), - [integration_scylladb_config], - {"email": "customer-1@example.com"}, - db, - ) - - assert v == {} - assert ( - privacy_request.access_tasks.count() == 6 - ) # There's 4 tables plus the root and terminal "dummy" tasks - - # Root task should be completed - assert privacy_request.access_tasks.first().collection_name == "__ROOT__" - assert ( - privacy_request.access_tasks.first().status == ExecutionLogStatus.complete - ) - - # All other tasks should be error - for access_task in privacy_request.access_tasks.offset(1): - assert access_task.status == ExecutionLogStatus.error - - @pytest.mark.parametrize( - "dsr_version", - ["use_dsr_2_0", "use_dsr_3_0"], - ) - async def test_scylladb_access_request_task( - self, - db, - policy, - integration_scylladb_config_with_keyspace, - scylla_reset_db, - scylladb_integration_with_keyspace, - privacy_request, - dsr_version, - request, - ) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - results = access_runner_tester( - privacy_request, - policy, - integration_scylladb_graph("scylla_example_with_keyspace"), - [integration_scylladb_config_with_keyspace], - {"email": "customer-1@example.com"}, - db, - ) - - assert_rows_match( - results["scylla_example_with_keyspace:users"], - min_size=1, - keys=[ - "age", - "alternative_contacts", - "do_not_contact", - "email", - "name", - "last_contacted", - "logins", - "states_lived", - ], - ) - assert_rows_match( - results["scylla_example_with_keyspace:user_activity"], - min_size=3, - keys=["timestamp", "user_agent", "activity_type"], - ) - assert_rows_match( - results["scylla_example_with_keyspace:payment_methods"], - min_size=2, - keys=["card_number", "expiration_date"], - ) - assert_rows_match( - results["scylla_example_with_keyspace:orders"], - min_size=2, - keys=["order_amount", "order_date", "order_description"], - ) - - @pytest.mark.parametrize( - "dsr_version", - ["use_dsr_2_0", "use_dsr_3_0"], - ) - async def test_scylladb_erasure_task( - self, - db, - integration_scylladb_config_with_keyspace, - scylladb_integration_with_keyspace, - scylla_reset_db, - privacy_request, - dsr_version, - request, - ): - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - seed_email = "customer-1@example.com" + # Confirm data retrieved from backup_schema, not public schema. This data only exists in the backup_schema. + assert access_results == { + f"{database_name}:address": [ + { + "id": 7, + "street": "Test Street", + "city": "Test Town", + "state": "TX", + "zip": "79843", + } + ], + f"{database_name}:payment_card": [], + f"{database_name}:orders": [], + f"{database_name}:customer": [ + { + "id": 1, + "name": "Johanna Customer", + "email": "customer-500@example.com", + "address_id": 7, + } + ], + } - policy = erasure_policy( - db, "user.name", "user.behavior", "user.device", "user.payment" - ) - privacy_request.policy_id = policy.id - privacy_request.save(db) + erasure_results = erasure_runner_tester( + privacy_request_with_erasure_policy, + erasure_policy, + graph, + [postgres_connection_config_with_schema], + {"email": customer_email}, + get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), + db, + ) - graph = integration_scylladb_graph("scylla_example_with_keyspace") - access_runner_tester( - privacy_request, - policy, - integration_scylladb_graph("scylla_example_with_keyspace"), - [integration_scylladb_config_with_keyspace], - {"email": seed_email}, - db, - ) - results = erasure_runner_tester( - privacy_request, - policy, - graph, - [integration_scylladb_config_with_keyspace], - {"email": seed_email}, - get_cached_data_for_erasures(privacy_request.id), - db, - ) - assert results == { - "scylla_example_with_keyspace:user_activity": 3, - "scylla_example_with_keyspace:users": 1, - "scylla_example_with_keyspace:payment_methods": 2, - "scylla_example_with_keyspace:orders": 2, - } + # Confirm record masked in non-default schema + assert erasure_results == { + f"{database_name}:customer": 1, + f"{database_name}:payment_card": 0, + f"{database_name}:orders": 0, + f"{database_name}:address": 0, + }, "Only one record on customer table has targeted data category" + customer_records = postgres_integration_db.execute( + text("select * from backup_schema.customer where id = 1;") + ) + johanna_record = [c for c in customer_records][0] + assert johanna_record.email == customer_email # Not masked + assert johanna_record.name is None # Masked by erasure request +@pytest.mark.integration_postgres @pytest.mark.integration @pytest.mark.asyncio @pytest.mark.parametrize( @@ -1565,18 +1085,17 @@ async def test_retry_erasure( execution_logs = db.query(ExecutionLog).filter_by( privacy_request_id=privacy_request.id, action_type=ActionType.erasure ) - assert 40 == execution_logs.count() + assert 44 == execution_logs.count() - # These nodes were able to complete because they didn't have a PK - nothing to erase visit_logs = execution_logs.filter_by(collection_name="visit") - assert {"in_processing", "complete"} == { + assert ["in_processing", "retrying", "retrying", "error"] == [ el.status.value for el in visit_logs - } + ] order_item_logs = execution_logs.filter_by(collection_name="order_item") - assert {"in_processing", "complete"} == { + assert ["in_processing", "retrying", "retrying", "error"] == [ el.status.value for el in order_item_logs - } + ] # Address log mask data couldn't run, attempted to retry twice per configuration address_logs = execution_logs.filter_by(collection_name="address").order_by( ExecutionLog.created_at @@ -1585,297 +1104,19 @@ async def test_retry_erasure( el.status.value for el in address_logs ] - # Downstream request tasks were marked as error. Some tasks completed because there is no PK - # on their collection and we can't erase - assert {rt.status.value for rt in privacy_request.erasure_tasks} == { + # Downstream request tasks (other than __ROOT__) were marked as error. + assert [rt.status.value for rt in privacy_request.erasure_tasks] == [ "complete", "error", "error", "error", - "complete", "error", "error", "error", "error", "error", - "complete", "error", "error", - } - - -@pytest.mark.integration_timescale -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_timescale_access_request_task( - db, - policy, - timescale_connection_config, - timescale_integration_db, - privacy_request, - dsr_version, - request, -) -> None: - database_name = "my_timescale_db_1" - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - v = access_runner_tester( - privacy_request, - policy, - integration_db_graph(database_name), - [timescale_connection_config], - {"email": "customer-1@example.com"}, - db, - ) - - assert_rows_match( - v[f"{database_name}:address"], - min_size=2, - keys=["id", "street", "city", "state", "zip"], - ) - assert_rows_match( - v[f"{database_name}:orders"], - min_size=3, - keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], - ) - assert_rows_match( - v[f"{database_name}:payment_card"], - min_size=2, - keys=["id", "name", "ccn", "customer_id", "billing_address_id"], - ) - assert_rows_match( - v[f"{database_name}:customer"], - min_size=1, - keys=["id", "name", "email", "address_id"], - ) - - # links - assert v[f"{database_name}:customer"][0]["email"] == "customer-1@example.com" - - logs = ( - ExecutionLog.query(db=db) - .filter(ExecutionLog.privacy_request_id == privacy_request.id) - .all() - ) - - logs = [log.__dict__ for log in logs] - - assert ( - len( - records_matching_fields( - logs, dataset_name=database_name, collection_name="customer" - ) - ) - > 0 - ) - - assert ( - len( - records_matching_fields( - logs, dataset_name=database_name, collection_name="address" - ) - ) - > 0 - ) - - assert ( - len( - records_matching_fields( - logs, dataset_name=database_name, collection_name="orders" - ) - ) - > 0 - ) - - assert ( - len( - records_matching_fields( - logs, - dataset_name=database_name, - collection_name="payment_card", - ) - ) - > 0 - ) - - -@pytest.mark.integration_timescale -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_timescale_erasure_request_task( - db, - erasure_policy, - timescale_connection_config, - timescale_integration_db, - privacy_request_with_erasure_policy, - dsr_version, - request, -) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - rule = erasure_policy.rules[0] - target = rule.targets[0] - target.data_category = "user" - target.save(db) - - database_name = "my_timescale_db_1" - - dataset = postgres_db_graph_dataset(database_name, timescale_connection_config.key) - - # Set some data categories on fields that will be targeted by the policy above - field([dataset], database_name, "customer", "name").data_categories = ["user.name"] - field([dataset], database_name, "address", "street").data_categories = ["user"] - field([dataset], database_name, "payment_card", "ccn").data_categories = ["user"] - - graph = DatasetGraph(dataset) - - v = access_runner_tester( - privacy_request_with_erasure_policy, - erasure_policy, - graph, - [timescale_connection_config], - {"email": "customer-1@example.com"}, - db, - ) - - v = erasure_runner_tester( - privacy_request_with_erasure_policy, - erasure_policy, - graph, - [timescale_connection_config], - {"email": "customer-1@example.com"}, - get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), - db, - ) - assert v == { - f"{database_name}:customer": 1, - f"{database_name}:orders": 0, - f"{database_name}:payment_card": 2, - f"{database_name}:address": 2, - }, "No erasure on orders table - no data categories targeted" - - # Verify masking in appropriate tables - address_cursor = timescale_integration_db.execute( - text("select * from address where id in (1, 2)") - ) - for address in address_cursor: - assert address.street is None # Masked due to matching data category - assert address.state is not None - assert address.city is not None - assert address.zip is not None - - customer_cursor = timescale_integration_db.execute( - text("select * from customer where id = 1") - ) - customer = [customer for customer in customer_cursor][0] - assert customer.name is None # Masked due to matching data category - assert customer.email == "customer-1@example.com" - assert customer.address_id is not None - - payment_card_cursor = timescale_integration_db.execute( - text("select * from payment_card where id in ('pay_aaa-aaa', 'pay_bbb-bbb')") - ) - payment_cards = [card for card in payment_card_cursor] - assert all( - [card.ccn is None for card in payment_cards] - ) # Masked due to matching data category - assert not any([card.name is None for card in payment_cards]) is None - - -@pytest.mark.integration_timescale -@pytest.mark.integration -@pytest.mark.asyncio -@pytest.mark.parametrize( - "dsr_version", - ["use_dsr_3_0", "use_dsr_2_0"], -) -async def test_timescale_query_and_mask_hypertable( - db, - erasure_policy, - timescale_connection_config, - timescale_integration_db, - privacy_request_with_erasure_policy, - dsr_version, - request, -) -> None: - request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 - - database_name = "my_timescale_db_1" - - dataset = postgres_db_graph_dataset(database_name, timescale_connection_config.key) - # For this test, add a new collection to our standard dataset corresponding to the - # "onsite_personnel" timescale hypertable - onsite_personnel_collection = Collection( - name="onsite_personnel", - fields=[ - ScalarField( - name="responsible", data_type_converter=str_converter, identity="email" - ), - ScalarField( - name="time", data_type_converter=str_converter, primary_key=True - ), - ], - ) - - dataset.collections.append(onsite_personnel_collection) - graph = DatasetGraph(dataset) - rule = erasure_policy.rules[0] - target = rule.targets[0] - target.data_category = "user" - target.save(db) - # Update data category on responsible field - field( - [dataset], database_name, "onsite_personnel", "responsible" - ).data_categories = ["user.contact.email"] - - access_results = access_runner_tester( - privacy_request_with_erasure_policy, - erasure_policy, - graph, - [timescale_connection_config], - {"email": "employee-1@example.com"}, - db, - ) - - # Demonstrate hypertable can be queried - assert access_results[f"{database_name}:onsite_personnel"] == [ - {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 1, 9, 0)}, - {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 2, 9, 0)}, - {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 3, 9, 0)}, - {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 5, 9, 0)}, - ] - - # Run an erasure on the hypertable targeting the responsible field - v = erasure_runner_tester( - privacy_request_with_erasure_policy, - erasure_policy, - graph, - [timescale_connection_config], - {"email": "employee-1@example.com"}, - get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), - db, - ) - - assert v == { - f"{database_name}:customer": 0, - f"{database_name}:orders": 0, - f"{database_name}:payment_card": 0, - f"{database_name}:address": 0, - f"{database_name}:onsite_personnel": 4, - }, "onsite_personnel.responsible was the only targeted data category" - - personnel_records = timescale_integration_db.execute( - text("select * from onsite_personnel") - ) - for record in personnel_records: - assert ( - record.responsible != "employee-1@example.com" - ) # These emails have all been masked + "error", + "error", + ] diff --git a/tests/ops/integration_tests/test_timescale_task.py b/tests/ops/integration_tests/test_timescale_task.py new file mode 100644 index 0000000000..97af65ce65 --- /dev/null +++ b/tests/ops/integration_tests/test_timescale_task.py @@ -0,0 +1,294 @@ +from datetime import datetime + +import pytest +from sqlalchemy import text + +from fides.api.graph.config import Collection, ScalarField +from fides.api.graph.graph import DatasetGraph +from fides.api.models.privacy_request import ExecutionLog +from fides.api.task.graph_task import get_cached_data_for_erasures + +from ...conftest import access_runner_tester, erasure_runner_tester +from ..graph.graph_test_util import assert_rows_match, field, records_matching_fields +from ..task.traversal_data import ( + integration_db_graph, + postgres_db_graph_dataset, + str_converter, +) + + +@pytest.mark.integration_timescale +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_timescale_access_request_task( + db, + policy, + timescale_connection_config, + timescale_integration_db, + privacy_request, + dsr_version, + request, +) -> None: + database_name = "my_timescale_db_1" + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + v = access_runner_tester( + privacy_request, + policy, + integration_db_graph(database_name), + [timescale_connection_config], + {"email": "customer-1@example.com"}, + db, + ) + + assert_rows_match( + v[f"{database_name}:address"], + min_size=2, + keys=["id", "street", "city", "state", "zip"], + ) + assert_rows_match( + v[f"{database_name}:orders"], + min_size=3, + keys=["id", "customer_id", "shipping_address_id", "payment_card_id"], + ) + assert_rows_match( + v[f"{database_name}:payment_card"], + min_size=2, + keys=["id", "name", "ccn", "customer_id", "billing_address_id"], + ) + assert_rows_match( + v[f"{database_name}:customer"], + min_size=1, + keys=["id", "name", "email", "address_id"], + ) + + # links + assert v[f"{database_name}:customer"][0]["email"] == "customer-1@example.com" + + logs = ( + ExecutionLog.query(db=db) + .filter(ExecutionLog.privacy_request_id == privacy_request.id) + .all() + ) + + logs = [log.__dict__ for log in logs] + + assert ( + len( + records_matching_fields( + logs, dataset_name=database_name, collection_name="customer" + ) + ) + > 0 + ) + + assert ( + len( + records_matching_fields( + logs, dataset_name=database_name, collection_name="address" + ) + ) + > 0 + ) + + assert ( + len( + records_matching_fields( + logs, dataset_name=database_name, collection_name="orders" + ) + ) + > 0 + ) + + assert ( + len( + records_matching_fields( + logs, + dataset_name=database_name, + collection_name="payment_card", + ) + ) + > 0 + ) + + +@pytest.mark.integration_timescale +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_timescale_erasure_request_task( + db, + erasure_policy, + timescale_connection_config, + timescale_integration_db, + privacy_request_with_erasure_policy, + dsr_version, + request, +) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + rule = erasure_policy.rules[0] + target = rule.targets[0] + target.data_category = "user" + target.save(db) + + database_name = "my_timescale_db_1" + + dataset = postgres_db_graph_dataset(database_name, timescale_connection_config.key) + + # Set some data categories on fields that will be targeted by the policy above + field([dataset], database_name, "customer", "name").data_categories = ["user.name"] + field([dataset], database_name, "address", "street").data_categories = ["user"] + field([dataset], database_name, "payment_card", "ccn").data_categories = ["user"] + + graph = DatasetGraph(dataset) + + v = access_runner_tester( + privacy_request_with_erasure_policy, + erasure_policy, + graph, + [timescale_connection_config], + {"email": "customer-1@example.com"}, + db, + ) + + v = erasure_runner_tester( + privacy_request_with_erasure_policy, + erasure_policy, + graph, + [timescale_connection_config], + {"email": "customer-1@example.com"}, + get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), + db, + ) + assert v == { + f"{database_name}:customer": 1, + f"{database_name}:orders": 0, + f"{database_name}:payment_card": 2, + f"{database_name}:address": 2, + }, "No erasure on orders table - no data categories targeted" + + # Verify masking in appropriate tables + address_cursor = timescale_integration_db.execute( + text("select * from address where id in (1, 2)") + ) + for address in address_cursor: + assert address.street is None # Masked due to matching data category + assert address.state is not None + assert address.city is not None + assert address.zip is not None + + customer_cursor = timescale_integration_db.execute( + text("select * from customer where id = 1") + ) + customer = [customer for customer in customer_cursor][0] + assert customer.name is None # Masked due to matching data category + assert customer.email == "customer-1@example.com" + assert customer.address_id is not None + + payment_card_cursor = timescale_integration_db.execute( + text("select * from payment_card where id in ('pay_aaa-aaa', 'pay_bbb-bbb')") + ) + payment_cards = [card for card in payment_card_cursor] + assert all( + [card.ccn is None for card in payment_cards] + ) # Masked due to matching data category + assert not any([card.name is None for card in payment_cards]) is None + + +@pytest.mark.integration_timescale +@pytest.mark.integration +@pytest.mark.asyncio +@pytest.mark.parametrize( + "dsr_version", + ["use_dsr_3_0", "use_dsr_2_0"], +) +async def test_timescale_query_and_mask_hypertable( + db, + erasure_policy, + timescale_connection_config, + timescale_integration_db, + privacy_request_with_erasure_policy, + dsr_version, + request, +) -> None: + request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + + database_name = "my_timescale_db_1" + + dataset = postgres_db_graph_dataset(database_name, timescale_connection_config.key) + # For this test, add a new collection to our standard dataset corresponding to the + # "onsite_personnel" timescale hypertable + onsite_personnel_collection = Collection( + name="onsite_personnel", + fields=[ + ScalarField( + name="responsible", data_type_converter=str_converter, identity="email" + ), + ScalarField( + name="time", data_type_converter=str_converter, primary_key=True + ), + ], + ) + + dataset.collections.append(onsite_personnel_collection) + graph = DatasetGraph(dataset) + rule = erasure_policy.rules[0] + target = rule.targets[0] + target.data_category = "user" + target.save(db) + # Update data category on responsible field + field( + [dataset], database_name, "onsite_personnel", "responsible" + ).data_categories = ["user.contact.email"] + + access_results = access_runner_tester( + privacy_request_with_erasure_policy, + erasure_policy, + graph, + [timescale_connection_config], + {"email": "employee-1@example.com"}, + db, + ) + + # Demonstrate hypertable can be queried + assert access_results[f"{database_name}:onsite_personnel"] == [ + {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 1, 9, 0)}, + {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 2, 9, 0)}, + {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 3, 9, 0)}, + {"responsible": "employee-1@example.com", "time": datetime(2022, 1, 5, 9, 0)}, + ] + + # Run an erasure on the hypertable targeting the responsible field + v = erasure_runner_tester( + privacy_request_with_erasure_policy, + erasure_policy, + graph, + [timescale_connection_config], + {"email": "employee-1@example.com"}, + get_cached_data_for_erasures(privacy_request_with_erasure_policy.id), + db, + ) + + assert v == { + f"{database_name}:customer": 0, + f"{database_name}:orders": 0, + f"{database_name}:payment_card": 0, + f"{database_name}:address": 0, + f"{database_name}:onsite_personnel": 4, + }, "onsite_personnel.responsible was the only targeted data category" + + personnel_records = timescale_integration_db.execute( + text("select * from onsite_personnel") + ) + for record in personnel_records: + assert ( + record.responsible != "employee-1@example.com" + ) # These emails have all been masked diff --git a/tests/ops/service/connectors/test_bigquery_connector.py b/tests/ops/service/connectors/test_bigquery_connector.py index a9524777fe..2e7bc3b075 100644 --- a/tests/ops/service/connectors/test_bigquery_connector.py +++ b/tests/ops/service/connectors/test_bigquery_connector.py @@ -129,7 +129,7 @@ def test_generate_update_partitioned_table( assert len(updates) == 2 assert ( str(updates[0]) - == "UPDATE `silken-precinct-284918.fidesopstest.customer` SET `name`=%(name:STRING)s WHERE `silken-precinct-284918.fidesopstest.customer`.`id` = %(id_1:INT64)s AND `created` > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1000 DAY) AND `created` <= CURRENT_TIMESTAMP()" + == "UPDATE `silken-precinct-284918.fidesopstest.customer` SET `name`=%(name:STRING)s WHERE `silken-precinct-284918.fidesopstest.customer`.`email` = %(email_1:STRING)s AND `created` > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1000 DAY) AND `created` <= CURRENT_TIMESTAMP()" ) def test_generate_delete_partitioned_table( @@ -158,7 +158,7 @@ def test_generate_delete_partitioned_table( assert len(deletes) == 2 assert ( str(deletes[0]) - == "DELETE FROM `silken-precinct-284918.fidesopstest.customer` WHERE `silken-precinct-284918.fidesopstest.customer`.`id` = %(id_1:INT64)s AND `created` > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1000 DAY) AND `created` <= CURRENT_TIMESTAMP()" + == "DELETE FROM `silken-precinct-284918.fidesopstest.customer` WHERE `silken-precinct-284918.fidesopstest.customer`.`email` = %(email_1:STRING)s AND `created` > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 1000 DAY) AND `created` <= CURRENT_TIMESTAMP()" ) def test_retrieve_partitioned_data( diff --git a/tests/ops/service/connectors/test_bigquery_queryconfig.py b/tests/ops/service/connectors/test_bigquery_queryconfig.py index 06c51c5105..24a16517b6 100644 --- a/tests/ops/service/connectors/test_bigquery_queryconfig.py +++ b/tests/ops/service/connectors/test_bigquery_queryconfig.py @@ -196,7 +196,7 @@ def test_generate_delete_stmt( ) stmts = set(str(stmt) for stmt in delete_stmts) expected_stmts = { - "DELETE FROM `employee` WHERE `employee`.`id` = %(id_1:STRING)s" + "DELETE FROM `employee` WHERE `employee`.`address_id` = %(address_id_1:STRING)s AND `employee`.`email` = %(email_1:STRING)s" } assert stmts == expected_stmts @@ -289,6 +289,6 @@ def test_generate_namespaced_delete_stmt( ) stmts = set(str(stmt) for stmt in delete_stmts) expected_stmts = { - "DELETE FROM `silken-precinct-284918.fidesopstest.employee` WHERE `silken-precinct-284918.fidesopstest.employee`.`id` = %(id_1:STRING)s" + "DELETE FROM `silken-precinct-284918.fidesopstest.employee` WHERE `silken-precinct-284918.fidesopstest.employee`.`address_id` = %(address_id_1:STRING)s AND `silken-precinct-284918.fidesopstest.employee`.`email` = %(email_1:STRING)s" } assert stmts == expected_stmts diff --git a/tests/ops/service/connectors/test_dynamodb_query_config.py b/tests/ops/service/connectors/test_dynamodb_query_config.py new file mode 100644 index 0000000000..4591ae9385 --- /dev/null +++ b/tests/ops/service/connectors/test_dynamodb_query_config.py @@ -0,0 +1,129 @@ +from datetime import datetime, timezone + +import pytest +from boto3.dynamodb.types import TypeDeserializer +from fideslang.models import Dataset + +from fides.api.graph.config import CollectionAddress +from fides.api.graph.graph import DatasetGraph +from fides.api.graph.traversal import Traversal +from fides.api.models.datasetconfig import convert_dataset_to_graph +from fides.api.models.privacy_request import PrivacyRequest +from fides.api.service.connectors.query_configs.dynamodb_query_config import ( + DynamoDBQueryConfig, +) + +privacy_request = PrivacyRequest(id="234544") + + +class TestDynamoDBQueryConfig: + @pytest.fixture(scope="function") + def identity(self): + identity = {"email": "customer-test_uuid@example.com"} + return identity + + @pytest.fixture(scope="function") + def dataset_graph(self, integration_dynamodb_config, example_datasets): + dataset = Dataset(**example_datasets[11]) + dataset_graph = convert_dataset_to_graph( + dataset, integration_dynamodb_config.key + ) + + return DatasetGraph(*[dataset_graph]) + + @pytest.fixture(scope="function") + def traversal(self, identity, dataset_graph): + dynamo_traversal = Traversal(dataset_graph, identity) + return dynamo_traversal + + @pytest.fixture(scope="function") + def customer_node(self, traversal): + return traversal.traversal_node_dict[ + CollectionAddress("dynamodb_example_test_dataset", "customer") + ].to_mock_execution_node() + + @pytest.fixture(scope="function") + def customer_identifier_node(self, traversal): + return traversal.traversal_node_dict[ + CollectionAddress("dynamodb_example_test_dataset", "customer_identifier") + ].to_mock_execution_node() + + @pytest.fixture(scope="function") + def customer_row(self): + row = { + "customer_email": {"S": "customer-1@example.com"}, + "name": {"S": "John Customer"}, + "address_id": {"L": [{"S": "1"}, {"S": "2"}]}, + "personal_info": {"M": {"gender": {"S": "male"}, "age": {"S": "99"}}}, + "id": {"S": "1"}, + } + return row + + @pytest.fixture(scope="function") + def deserialized_customer_row(self, customer_row): + deserialized_customer_row = {} + deserializer = TypeDeserializer() + for key, value in customer_row.items(): + deserialized_customer_row[key] = deserializer.deserialize(value) + return deserialized_customer_row + + @pytest.fixture(scope="function") + def customer_identifier_row(self): + row = { + "customer_id": {"S": "customer-1@example.com"}, + "email": {"S": "customer-1@example.com"}, + "name": {"S": "Customer 1"}, + "created": {"S": datetime.now(timezone.utc).isoformat()}, + } + return row + + @pytest.fixture(scope="function") + def deserialized_customer_identifier_row(self, customer_identifier_row): + deserialized_customer_identifier_row = {} + deserializer = TypeDeserializer() + for key, value in customer_identifier_row.items(): + deserialized_customer_identifier_row[key] = deserializer.deserialize(value) + return deserialized_customer_identifier_row + + def test_get_query_param_formatting_single_key( + self, + resources_dict, + customer_node, + ) -> None: + input_data = { + "fidesops_grouped_inputs": [], + "email": ["customer-test_uuid@example.com"], + } + attribute_definitions = [{"AttributeName": "email", "AttributeType": "S"}] + query_config = DynamoDBQueryConfig(customer_node, attribute_definitions) + item = query_config.generate_query( + input_data=input_data, policy=resources_dict["policy"] + ) + assert item["ExpressionAttributeValues"] == { + ":value": {"S": "customer-test_uuid@example.com"} + } + assert item["KeyConditionExpression"] == "email = :value" + + def test_put_query_param_formatting_single_key( + self, + erasure_policy, + customer_node, + deserialized_customer_row, + ) -> None: + input_data = { + "fidesops_grouped_inputs": [], + "email": ["customer-test_uuid@example.com"], + } + attribute_definitions = [{"AttributeName": "email", "AttributeType": "S"}] + query_config = DynamoDBQueryConfig(customer_node, attribute_definitions) + update_item = query_config.generate_update_stmt( + deserialized_customer_row, erasure_policy, privacy_request + ) + + assert update_item == { + "customer_email": {"S": "customer-1@example.com"}, + "name": {"NULL": True}, + "address_id": {"L": [{"S": "1"}, {"S": "2"}]}, + "personal_info": {"M": {"gender": {"S": "male"}, "age": {"S": "99"}}}, + "id": {"S": "1"}, + } diff --git a/tests/ops/service/connectors/test_mongo_query_config.py b/tests/ops/service/connectors/test_mongo_query_config.py new file mode 100644 index 0000000000..c0f6079df1 --- /dev/null +++ b/tests/ops/service/connectors/test_mongo_query_config.py @@ -0,0 +1,283 @@ +import pytest +from fideslang.models import Dataset + +from fides.api.graph.config import ( + CollectionAddress, + FieldAddress, + FieldPath, + ObjectField, + ScalarField, +) +from fides.api.graph.graph import DatasetGraph, Edge +from fides.api.graph.traversal import Traversal +from fides.api.models.datasetconfig import convert_dataset_to_graph +from fides.api.models.privacy_request import PrivacyRequest +from fides.api.schemas.masking.masking_configuration import HashMaskingConfiguration +from fides.api.schemas.masking.masking_secrets import MaskingSecretCache, SecretType +from fides.api.service.connectors.query_configs.mongodb_query_config import ( + MongoQueryConfig, +) +from fides.api.service.masking.strategy.masking_strategy_hash import HashMaskingStrategy +from fides.api.util.data_category import DataCategory + +from ...task.traversal_data import combined_mongo_postgresql_graph +from ...test_helpers.cache_secrets_helper import cache_secret + +privacy_request = PrivacyRequest(id="234544") + + +class TestMongoQueryConfig: + @pytest.fixture(scope="function") + def combined_traversal(self, connection_config, integration_mongodb_config): + mongo_dataset, postgres_dataset = combined_mongo_postgresql_graph( + connection_config, integration_mongodb_config + ) + combined_dataset_graph = DatasetGraph(mongo_dataset, postgres_dataset) + combined_traversal = Traversal( + combined_dataset_graph, + {"email": "customer-1@examplecom"}, + ) + return combined_traversal + + @pytest.fixture(scope="function") + def customer_details_node(self, combined_traversal): + return combined_traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_details") + ].to_mock_execution_node() + + @pytest.fixture(scope="function") + def customer_feedback_node(self, combined_traversal): + return combined_traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_feedback") + ].to_mock_execution_node() + + def test_field_map_nested(self, customer_details_node): + config = MongoQueryConfig(customer_details_node) + + field_map = config.field_map() + assert isinstance(field_map[FieldPath("workplace_info")], ObjectField) + assert isinstance( + field_map[FieldPath("workplace_info", "employer")], ScalarField + ) + + def test_primary_key_field_paths(self, customer_details_node): + config = MongoQueryConfig(customer_details_node) + assert list(config.primary_key_field_paths.keys()) == [FieldPath("_id")] + assert isinstance(config.primary_key_field_paths[FieldPath("_id")], ScalarField) + + def test_nested_query_field_paths( + self, customer_details_node, customer_feedback_node + ): + assert customer_details_node.query_field_paths == { + FieldPath("customer_id"), + } + + assert customer_feedback_node.query_field_paths == { + FieldPath("customer_information", "email") + } + + def test_nested_typed_filtered_values(self, customer_feedback_node): + """Identity data is located on a nested object""" + input_data = { + "customer_information.email": ["test@example.com"], + "ignore": ["abcde"], + } + assert customer_feedback_node.typed_filtered_values(input_data) == { + "customer_information.email": ["test@example.com"] + } + + def test_generate_query( + self, + policy, + example_datasets, + integration_mongodb_config, + connection_config, + ): + dataset_postgres = Dataset(**example_datasets[0]) + graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) + dataset_mongo = Dataset(**example_datasets[1]) + mongo_graph = convert_dataset_to_graph( + dataset_mongo, integration_mongodb_config.key + ) + dataset_graph = DatasetGraph(*[graph, mongo_graph]) + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) + # Edge created from Root to nested customer_information.email field + assert ( + Edge( + FieldAddress("__ROOT__", "__ROOT__", "email"), + FieldAddress( + "mongo_test", "customer_feedback", "customer_information", "email" + ), + ) + in traversal.edges + ) + + # Test query on nested field + customer_feedback = traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_feedback") + ].to_mock_execution_node() + config = MongoQueryConfig(customer_feedback) + input_data = {"customer_information.email": ["customer-1@example.com"]} + # Tuple of query, projection - Searching for documents with nested + # customer_information.email = customer-1@example.com + assert config.generate_query(input_data, policy) == ( + {"customer_information.email": "customer-1@example.com"}, + {"_id": 1, "customer_information": 1, "date": 1, "message": 1, "rating": 1}, + ) + + # Test query nested data + customer_details = traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_details") + ].to_mock_execution_node() + config = MongoQueryConfig(customer_details) + input_data = {"customer_id": [1]} + # Tuple of query, projection - Projection is specifying fields at the top-level. Nested data will + # be filtered later. + assert config.generate_query(input_data, policy) == ( + {"customer_id": 1}, + { + "_id": 1, + "birthday": 1, + "comments": 1, + "customer_id": 1, + "customer_uuid": 1, + "emergency_contacts": 1, + "children": 1, + "gender": 1, + "travel_identifiers": 1, + "workplace_info": 1, + }, + ) + + def test_generate_update_stmt_multiple_fields( + self, + erasure_policy, + example_datasets, + integration_mongodb_config, + connection_config, + ): + dataset_postgres = Dataset(**example_datasets[0]) + graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) + dataset_mongo = Dataset(**example_datasets[1]) + mongo_graph = convert_dataset_to_graph( + dataset_mongo, integration_mongodb_config.key + ) + dataset_graph = DatasetGraph(*[graph, mongo_graph]) + + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) + customer_details = traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_details") + ].to_mock_execution_node() + config = MongoQueryConfig(customer_details) + row = { + "birthday": "1988-01-10", + "gender": "male", + "customer_id": 1, + "_id": 1, + "workplace_info": { + "position": "Chief Strategist", + "direct_reports": ["Robbie Margo", "Sully Hunter"], + }, + "emergency_contacts": [{"name": "June Customer", "phone": "444-444-4444"}], + "children": ["Christopher Customer", "Courtney Customer"], + } + + # Make target more broad + rule = erasure_policy.rules[0] + target = rule.targets[0] + target.data_category = DataCategory("user").value + + mongo_statement = config.generate_update_stmt( + row, erasure_policy, privacy_request + ) + + expected_result_0 = {"_id": 1} + expected_result_1 = { + "$set": { + "birthday": None, + "children.0": None, + "children.1": None, + "customer_id": None, + "emergency_contacts.0.name": None, + "workplace_info.direct_reports.0": None, # Both direct reports are masked. + "workplace_info.direct_reports.1": None, + "emergency_contacts.0.phone": None, + "gender": None, + "workplace_info.position": None, + } + } + + print(mongo_statement[1]) + print(expected_result_1) + assert mongo_statement[0] == expected_result_0 + assert mongo_statement[1] == expected_result_1 + + def test_generate_update_stmt_multiple_rules( + self, + erasure_policy_two_rules, + example_datasets, + integration_mongodb_config, + connection_config, + ): + dataset_postgres = Dataset(**example_datasets[0]) + graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) + dataset_mongo = Dataset(**example_datasets[1]) + mongo_graph = convert_dataset_to_graph( + dataset_mongo, integration_mongodb_config.key + ) + dataset_graph = DatasetGraph(*[graph, mongo_graph]) + + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) + + customer_details = traversal.traversal_node_dict[ + CollectionAddress("mongo_test", "customer_details") + ].to_mock_execution_node() + + config = MongoQueryConfig(customer_details) + row = { + "birthday": "1988-01-10", + "gender": "male", + "customer_id": 1, + "_id": 1, + "workplace_info": { + "position": "Chief Strategist", + "direct_reports": ["Robbie Margo", "Sully Hunter"], + }, + "emergency_contacts": [{"name": "June Customer", "phone": "444-444-4444"}], + "children": ["Christopher Customer", "Courtney Customer"], + } + + rule = erasure_policy_two_rules.rules[0] + rule.masking_strategy = { + "strategy": "hash", + "configuration": {"algorithm": "SHA-512"}, + } + target = rule.targets[0] + target.data_category = DataCategory("user.demographic.date_of_birth").value + + rule_two = erasure_policy_two_rules.rules[1] + rule_two.masking_strategy = { + "strategy": "random_string_rewrite", + "configuration": {"length": 30}, + } + target = rule_two.targets[0] + target.data_category = DataCategory("user.demographic.gender").value + # cache secrets for hash strategy + secret = MaskingSecretCache[str]( + secret="adobo", + masking_strategy=HashMaskingStrategy.name, + secret_type=SecretType.salt, + ) + cache_secret(secret, privacy_request.id) + + mongo_statement = config.generate_update_stmt( + row, erasure_policy_two_rules, privacy_request + ) + assert mongo_statement[0] == {"_id": 1} + assert len(mongo_statement[1]["$set"]["gender"]) == 30 + assert ( + mongo_statement[1]["$set"]["birthday"] + == HashMaskingStrategy(HashMaskingConfiguration(algorithm="SHA-512")).mask( + ["1988-01-10"], request_id=privacy_request.id + )[0] + ) diff --git a/tests/ops/service/connectors/test_query_config.py b/tests/ops/service/connectors/test_query_config.py index 01d7b9dbd2..eac650d587 100644 --- a/tests/ops/service/connectors/test_query_config.py +++ b/tests/ops/service/connectors/test_query_config.py @@ -1,43 +1,29 @@ -from datetime import datetime, timezone from typing import Any, Dict, Set from unittest import mock import pytest -from boto3.dynamodb.types import TypeDeserializer from fideslang.models import Dataset from fides.api.common_exceptions import MissingNamespaceSchemaException -from fides.api.graph.config import ( - CollectionAddress, - FieldAddress, - FieldPath, - ObjectField, - ScalarField, -) +from fides.api.graph.config import CollectionAddress, FieldPath from fides.api.graph.execution import ExecutionNode -from fides.api.graph.graph import DatasetGraph, Edge +from fides.api.graph.graph import DatasetGraph from fides.api.graph.traversal import Traversal, TraversalNode from fides.api.models.datasetconfig import convert_dataset_to_graph from fides.api.models.privacy_request import PrivacyRequest from fides.api.schemas.masking.masking_configuration import HashMaskingConfiguration from fides.api.schemas.masking.masking_secrets import MaskingSecretCache, SecretType from fides.api.schemas.namespace_meta.namespace_meta import NamespaceMeta -from fides.api.service.connectors.query_configs.dynamodb_query_config import ( - DynamoDBQueryConfig, -) -from fides.api.service.connectors.query_configs.mongodb_query_config import ( - MongoQueryConfig, -) from fides.api.service.connectors.query_configs.query_config import ( QueryConfig, SQLQueryConfig, ) -from fides.api.service.connectors.scylla_query_config import ScyllaDBQueryConfig from fides.api.service.masking.strategy.masking_strategy_hash import HashMaskingStrategy from fides.api.util.data_category import DataCategory from tests.fixtures.application_fixtures import load_dataset +from tests.ops.test_helpers.dataset_utils import remove_primary_keys -from ...task.traversal_data import combined_mongo_postgresql_graph, integration_db_graph +from ...task.traversal_data import integration_db_graph from ...test_helpers.cache_secrets_helper import cache_secret, clear_cache_secrets # customers -> address, order @@ -286,9 +272,47 @@ def test_generate_update_stmt_one_field( "id": 1, } text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request) - assert text_clause.text == """UPDATE customer SET name = :name WHERE id = :id""" - assert text_clause._bindparams["name"].key == "name" - assert text_clause._bindparams["name"].value is None # Null masking strategy + assert ( + text_clause.text + == """UPDATE customer SET name = :masked_name WHERE id = :id""" + ) + assert text_clause._bindparams["masked_name"].key == "masked_name" + assert ( + text_clause._bindparams["masked_name"].value is None + ) # Null masking strategy + + def test_generate_update_stmt_one_field_inbound_reference( + self, erasure_policy_address_city, example_datasets, connection_config + ): + dataset = Dataset(**example_datasets[0]) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) + + address_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "address") + ].to_mock_execution_node() + + config = SQLQueryConfig(address_node) + row = { + "id": 1, + "house": "123", + "street": "Main St", + "city": "San Francisco", + "state": "CA", + "zip": "94105", + } + text_clause = config.generate_update_stmt( + row, erasure_policy_address_city, privacy_request + ) + assert ( + text_clause.text + == """UPDATE address SET city = :masked_city WHERE id = :id""" + ) + assert text_clause._bindparams["masked_city"].key == "masked_city" + assert ( + text_clause._bindparams["masked_city"].value is None + ) # Null masking strategy def test_generate_update_stmt_length_truncation( self, @@ -316,11 +340,14 @@ def test_generate_update_stmt_length_truncation( text_clause = config.generate_update_stmt( row, erasure_policy_string_rewrite_long, privacy_request ) - assert text_clause.text == """UPDATE customer SET name = :name WHERE id = :id""" - assert text_clause._bindparams["name"].key == "name" + assert ( + text_clause.text + == """UPDATE customer SET name = :masked_name WHERE id = :id""" + ) + assert text_clause._bindparams["masked_name"].key == "masked_name" # length truncation on name field assert ( - text_clause._bindparams["name"].value + text_clause._bindparams["masked_name"].value == "some rewrite value that is very long and" ) @@ -365,22 +392,23 @@ def test_generate_update_stmt_multiple_fields_same_rule( text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request) assert ( text_clause.text - == "UPDATE customer SET email = :email, name = :name WHERE id = :id" + == "UPDATE customer SET email = :masked_email, name = :masked_name WHERE id = :id" ) - assert text_clause._bindparams["name"].key == "name" + assert text_clause._bindparams["masked_name"].key == "masked_name" # since length is set to 40 in dataset.yml, we expect only first 40 chars of masked val assert ( - text_clause._bindparams["name"].value + text_clause._bindparams["masked_name"].value == HashMaskingStrategy(HashMaskingConfiguration(algorithm="SHA-512")).mask( ["John Customer"], request_id=privacy_request.id )[0][0:40] ) assert ( - text_clause._bindparams["email"].value + text_clause._bindparams["masked_email"].value == HashMaskingStrategy(HashMaskingConfiguration(algorithm="SHA-512")).mask( ["customer-1@example.com"], request_id=privacy_request.id )[0] ) + assert text_clause._bindparams["id"].value == 1 clear_cache_secrets(privacy_request.id) def test_generate_update_stmts_from_multiple_rules( @@ -409,251 +437,145 @@ def test_generate_update_stmts_from_multiple_rules( assert ( text_clause.text - == "UPDATE customer SET email = :email, name = :name WHERE id = :id" + == "UPDATE customer SET email = :masked_email, name = :masked_name WHERE id = :id" ) # Two different masking strategies used for name and email - assert text_clause._bindparams["name"].value is None # Null masking strategy assert ( - text_clause._bindparams["email"].value == "*****" + text_clause._bindparams["masked_name"].value is None + ) # Null masking strategy + assert ( + text_clause._bindparams["masked_email"].value == "*****" ) # String rewrite masking strategy + def test_generate_update_stmt_one_field_without_primary_keys( + self, erasure_policy, example_datasets, connection_config + ): + dataset = remove_primary_keys(Dataset(**example_datasets[0])) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) -class TestMongoQueryConfig: - @pytest.fixture(scope="function") - def combined_traversal(self, connection_config, integration_mongodb_config): - mongo_dataset, postgres_dataset = combined_mongo_postgresql_graph( - connection_config, integration_mongodb_config - ) - combined_dataset_graph = DatasetGraph(mongo_dataset, postgres_dataset) - combined_traversal = Traversal( - combined_dataset_graph, - {"email": "customer-1@examplecom"}, - ) - return combined_traversal - - @pytest.fixture(scope="function") - def customer_details_node(self, combined_traversal): - return combined_traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_details") - ].to_mock_execution_node() - - @pytest.fixture(scope="function") - def customer_feedback_node(self, combined_traversal): - return combined_traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_feedback") + customer_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "customer") ].to_mock_execution_node() - def test_field_map_nested(self, customer_details_node): - config = MongoQueryConfig(customer_details_node) - - field_map = config.field_map() - assert isinstance(field_map[FieldPath("workplace_info")], ObjectField) - assert isinstance( - field_map[FieldPath("workplace_info", "employer")], ScalarField + config = SQLQueryConfig(customer_node) + row = { + "email": "customer-1@example.com", + "name": "John Customer", + "address_id": 1, + "id": 1, + } + text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request) + assert ( + text_clause.text + == """UPDATE customer SET name = :masked_name WHERE email = :email""" ) + assert text_clause._bindparams["masked_name"].key == "masked_name" + assert ( + text_clause._bindparams["masked_name"].value is None + ) # Null masking strategy - def test_primary_key_field_paths(self, customer_details_node): - config = MongoQueryConfig(customer_details_node) - assert list(config.primary_key_field_paths.keys()) == [FieldPath("_id")] - assert isinstance(config.primary_key_field_paths[FieldPath("_id")], ScalarField) - - def test_nested_query_field_paths( - self, customer_details_node, customer_feedback_node + def test_generate_update_stmt_one_field_inbound_reference_without_primary_keys( + self, erasure_policy_address_city, example_datasets, connection_config ): - assert customer_details_node.query_field_paths == { - FieldPath("customer_id"), - } + dataset = remove_primary_keys(Dataset(**example_datasets[0])) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) - assert customer_feedback_node.query_field_paths == { - FieldPath("customer_information", "email") - } + address_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "address") + ].to_mock_execution_node() - def test_nested_typed_filtered_values(self, customer_feedback_node): - """Identity data is located on a nested object""" - input_data = { - "customer_information.email": ["test@example.com"], - "ignore": ["abcde"], - } - assert customer_feedback_node.typed_filtered_values(input_data) == { - "customer_information.email": ["test@example.com"] + config = SQLQueryConfig(address_node) + row = { + "id": 1, + "house": "123", + "street": "Main St", + "city": "San Francisco", + "state": "CA", + "zip": "94105", } - - def test_generate_query( - self, - policy, - example_datasets, - integration_mongodb_config, - connection_config, - ): - dataset_postgres = Dataset(**example_datasets[0]) - graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) - dataset_mongo = Dataset(**example_datasets[1]) - mongo_graph = convert_dataset_to_graph( - dataset_mongo, integration_mongodb_config.key + text_clause = config.generate_update_stmt( + row, erasure_policy_address_city, privacy_request ) - dataset_graph = DatasetGraph(*[graph, mongo_graph]) - traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) - # Edge created from Root to nested customer_information.email field assert ( - Edge( - FieldAddress("__ROOT__", "__ROOT__", "email"), - FieldAddress( - "mongo_test", "customer_feedback", "customer_information", "email" - ), - ) - in traversal.edges - ) - - # Test query on nested field - customer_feedback = traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_feedback") - ].to_mock_execution_node() - config = MongoQueryConfig(customer_feedback) - input_data = {"customer_information.email": ["customer-1@example.com"]} - # Tuple of query, projection - Searching for documents with nested - # customer_information.email = customer-1@example.com - assert config.generate_query(input_data, policy) == ( - {"customer_information.email": "customer-1@example.com"}, - {"_id": 1, "customer_information": 1, "date": 1, "message": 1, "rating": 1}, - ) - - # Test query nested data - customer_details = traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_details") - ].to_mock_execution_node() - config = MongoQueryConfig(customer_details) - input_data = {"customer_id": [1]} - # Tuple of query, projection - Projection is specifying fields at the top-level. Nested data will - # be filtered later. - assert config.generate_query(input_data, policy) == ( - {"customer_id": 1}, - { - "_id": 1, - "birthday": 1, - "comments": 1, - "customer_id": 1, - "customer_uuid": 1, - "emergency_contacts": 1, - "children": 1, - "gender": 1, - "travel_identifiers": 1, - "workplace_info": 1, - }, + text_clause.text + == """UPDATE address SET city = :masked_city WHERE id = :id""" ) + assert text_clause._bindparams["masked_city"].key == "masked_city" + assert ( + text_clause._bindparams["masked_city"].value is None + ) # Null masking strategy - def test_generate_update_stmt_multiple_fields( + def test_generate_update_stmt_length_truncation_without_primary_keys( self, - erasure_policy, + erasure_policy_string_rewrite_long, example_datasets, - integration_mongodb_config, connection_config, ): - dataset_postgres = Dataset(**example_datasets[0]) - graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) - dataset_mongo = Dataset(**example_datasets[1]) - mongo_graph = convert_dataset_to_graph( - dataset_mongo, integration_mongodb_config.key - ) - dataset_graph = DatasetGraph(*[graph, mongo_graph]) - + dataset = remove_primary_keys(Dataset(**example_datasets[0])) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) - customer_details = traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_details") + + customer_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "customer") ].to_mock_execution_node() - config = MongoQueryConfig(customer_details) + + config = SQLQueryConfig(customer_node) row = { - "birthday": "1988-01-10", - "gender": "male", - "customer_id": 1, - "_id": 1, - "workplace_info": { - "position": "Chief Strategist", - "direct_reports": ["Robbie Margo", "Sully Hunter"], - }, - "emergency_contacts": [{"name": "June Customer", "phone": "444-444-4444"}], - "children": ["Christopher Customer", "Courtney Customer"], + "email": "customer-1@example.com", + "name": "John Customer", + "address_id": 1, + "id": 1, } - # Make target more broad - rule = erasure_policy.rules[0] - target = rule.targets[0] - target.data_category = DataCategory("user").value - - mongo_statement = config.generate_update_stmt( - row, erasure_policy, privacy_request + text_clause = config.generate_update_stmt( + row, erasure_policy_string_rewrite_long, privacy_request ) - - expected_result_0 = {"_id": 1} - expected_result_1 = { - "$set": { - "birthday": None, - "children.0": None, - "children.1": None, - "customer_id": None, - "emergency_contacts.0.name": None, - "workplace_info.direct_reports.0": None, # Both direct reports are masked. - "workplace_info.direct_reports.1": None, - "emergency_contacts.0.phone": None, - "gender": None, - "workplace_info.position": None, - } - } - - print(mongo_statement[1]) - print(expected_result_1) - assert mongo_statement[0] == expected_result_0 - assert mongo_statement[1] == expected_result_1 - - def test_generate_update_stmt_multiple_rules( - self, - erasure_policy_two_rules, - example_datasets, - integration_mongodb_config, - connection_config, - ): - dataset_postgres = Dataset(**example_datasets[0]) - graph = convert_dataset_to_graph(dataset_postgres, connection_config.key) - dataset_mongo = Dataset(**example_datasets[1]) - mongo_graph = convert_dataset_to_graph( - dataset_mongo, integration_mongodb_config.key + assert ( + text_clause.text + == """UPDATE customer SET name = :masked_name WHERE email = :email""" + ) + assert text_clause._bindparams["masked_name"].key == "masked_name" + # length truncation on name field + assert ( + text_clause._bindparams["masked_name"].value + == "some rewrite value that is very long and" ) - dataset_graph = DatasetGraph(*[graph, mongo_graph]) + def test_generate_update_stmt_multiple_fields_same_rule_without_primary_keys( + self, erasure_policy, example_datasets, connection_config + ): + dataset = remove_primary_keys(Dataset(**example_datasets[0])) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) - customer_details = traversal.traversal_node_dict[ - CollectionAddress("mongo_test", "customer_details") + customer_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "customer") ].to_mock_execution_node() - config = MongoQueryConfig(customer_details) + config = SQLQueryConfig(customer_node) row = { - "birthday": "1988-01-10", - "gender": "male", - "customer_id": 1, - "_id": 1, - "workplace_info": { - "position": "Chief Strategist", - "direct_reports": ["Robbie Margo", "Sully Hunter"], - }, - "emergency_contacts": [{"name": "June Customer", "phone": "444-444-4444"}], - "children": ["Christopher Customer", "Courtney Customer"], + "email": "customer-1@example.com", + "name": "John Customer", + "address_id": 1, + "id": 1, } - rule = erasure_policy_two_rules.rules[0] + # Make target more broad + rule = erasure_policy.rules[0] + target = rule.targets[0] + target.data_category = DataCategory("user").value + + # Update rule masking strategy rule.masking_strategy = { "strategy": "hash", "configuration": {"algorithm": "SHA-512"}, } - target = rule.targets[0] - target.data_category = DataCategory("user.demographic.date_of_birth").value - - rule_two = erasure_policy_two_rules.rules[1] - rule_two.masking_strategy = { - "strategy": "random_string_rewrite", - "configuration": {"length": 30}, - } - target = rule_two.targets[0] - target.data_category = DataCategory("user.demographic.gender").value # cache secrets for hash strategy secret = MaskingSecretCache[str]( secret="adobo", @@ -662,169 +584,63 @@ def test_generate_update_stmt_multiple_rules( ) cache_secret(secret, privacy_request.id) - mongo_statement = config.generate_update_stmt( - row, erasure_policy_two_rules, privacy_request + text_clause = config.generate_update_stmt(row, erasure_policy, privacy_request) + assert ( + text_clause.text + == "UPDATE customer SET email = :masked_email, name = :masked_name WHERE email = :email" ) - assert mongo_statement[0] == {"_id": 1} - assert len(mongo_statement[1]["$set"]["gender"]) == 30 + assert text_clause._bindparams["masked_name"].key == "masked_name" + # since length is set to 40 in dataset.yml, we expect only first 40 chars of masked val assert ( - mongo_statement[1]["$set"]["birthday"] + text_clause._bindparams["masked_name"].value == HashMaskingStrategy(HashMaskingConfiguration(algorithm="SHA-512")).mask( - ["1988-01-10"], request_id=privacy_request.id - )[0] + ["John Customer"], request_id=privacy_request.id + )[0][0:40] ) - - -class TestDynamoDBQueryConfig: - @pytest.fixture(scope="function") - def identity(self): - identity = {"email": "customer-test_uuid@example.com"} - return identity - - @pytest.fixture(scope="function") - def dataset_graph(self, integration_dynamodb_config, example_datasets): - dataset = Dataset(**example_datasets[11]) - dataset_graph = convert_dataset_to_graph( - dataset, integration_dynamodb_config.key + assert ( + text_clause._bindparams["masked_email"].value + == HashMaskingStrategy(HashMaskingConfiguration(algorithm="SHA-512")).mask( + ["customer-1@example.com"], request_id=privacy_request.id + )[0] ) + assert text_clause._bindparams["email"].value == "customer-1@example.com" + clear_cache_secrets(privacy_request.id) - return DatasetGraph(*[dataset_graph]) - - @pytest.fixture(scope="function") - def traversal(self, identity, dataset_graph): - dynamo_traversal = Traversal(dataset_graph, identity) - return dynamo_traversal - - @pytest.fixture(scope="function") - def customer_node(self, traversal): - return traversal.traversal_node_dict[ - CollectionAddress("dynamodb_example_test_dataset", "customer") - ].to_mock_execution_node() - - @pytest.fixture(scope="function") - def customer_identifier_node(self, traversal): - return traversal.traversal_node_dict[ - CollectionAddress("dynamodb_example_test_dataset", "customer_identifier") - ].to_mock_execution_node() - - @pytest.fixture(scope="function") - def customer_row(self): - row = { - "customer_email": {"S": "customer-1@example.com"}, - "name": {"S": "John Customer"}, - "address_id": {"L": [{"S": "1"}, {"S": "2"}]}, - "personal_info": {"M": {"gender": {"S": "male"}, "age": {"S": "99"}}}, - "id": {"S": "1"}, - } - return row - - @pytest.fixture(scope="function") - def deserialized_customer_row(self, customer_row): - deserialized_customer_row = {} - deserializer = TypeDeserializer() - for key, value in customer_row.items(): - deserialized_customer_row[key] = deserializer.deserialize(value) - return deserialized_customer_row - - @pytest.fixture(scope="function") - def customer_identifier_row(self): + def test_generate_update_stmts_from_multiple_rules_without_primary_keys( + self, erasure_policy_two_rules, example_datasets, connection_config + ): + dataset = remove_primary_keys(Dataset(**example_datasets[0])) + graph = convert_dataset_to_graph(dataset, connection_config.key) + dataset_graph = DatasetGraph(*[graph]) + traversal = Traversal(dataset_graph, {"email": "customer-1@example.com"}) row = { - "customer_id": {"S": "customer-1@example.com"}, - "email": {"S": "customer-1@example.com"}, - "name": {"S": "Customer 1"}, - "created": {"S": datetime.now(timezone.utc).isoformat()}, - } - return row - - @pytest.fixture(scope="function") - def deserialized_customer_identifier_row(self, customer_identifier_row): - deserialized_customer_identifier_row = {} - deserializer = TypeDeserializer() - for key, value in customer_identifier_row.items(): - deserialized_customer_identifier_row[key] = deserializer.deserialize(value) - return deserialized_customer_identifier_row - - def test_get_query_param_formatting_single_key( - self, - resources_dict, - customer_node, - ) -> None: - input_data = { - "fidesops_grouped_inputs": [], - "email": ["customer-test_uuid@example.com"], - } - attribute_definitions = [{"AttributeName": "email", "AttributeType": "S"}] - query_config = DynamoDBQueryConfig(customer_node, attribute_definitions) - item = query_config.generate_query( - input_data=input_data, policy=resources_dict["policy"] - ) - assert item["ExpressionAttributeValues"] == { - ":value": {"S": "customer-test_uuid@example.com"} - } - assert item["KeyConditionExpression"] == "email = :value" - - def test_put_query_param_formatting_single_key( - self, - erasure_policy, - customer_node, - deserialized_customer_row, - ) -> None: - input_data = { - "fidesops_grouped_inputs": [], - "email": ["customer-test_uuid@example.com"], - } - attribute_definitions = [{"AttributeName": "email", "AttributeType": "S"}] - query_config = DynamoDBQueryConfig(customer_node, attribute_definitions) - update_item = query_config.generate_update_stmt( - deserialized_customer_row, erasure_policy, privacy_request - ) - - assert update_item == { - "customer_email": {"S": "customer-1@example.com"}, - "name": {"NULL": True}, - "address_id": {"L": [{"S": "1"}, {"S": "2"}]}, - "personal_info": {"M": {"gender": {"S": "male"}, "age": {"S": "99"}}}, - "id": {"S": "1"}, + "email": "customer-1@example.com", + "name": "John Customer", + "address_id": 1, + "id": 1, } - -class TestScyllaDBQueryConfig: - @pytest.fixture(scope="function") - def complete_execution_node( - self, example_datasets, integration_scylladb_config_with_keyspace - ): - dataset = Dataset(**example_datasets[15]) - graph = convert_dataset_to_graph( - dataset, integration_scylladb_config_with_keyspace.key - ) - dataset_graph = DatasetGraph(*[graph]) - identity = {"email": "customer-1@example.com"} - scylla_traversal = Traversal(dataset_graph, identity) - return scylla_traversal.traversal_node_dict[ - CollectionAddress("scylladb_example_test_dataset", "users") + customer_node = traversal.traversal_node_dict[ + CollectionAddress("postgres_example_test_dataset", "customer") ].to_mock_execution_node() - def test_dry_run_query_no_data(self, scylladb_execution_node): - query_config = ScyllaDBQueryConfig(scylladb_execution_node) - dry_run_query = query_config.dry_run_query() - assert dry_run_query is None + config = SQLQueryConfig(customer_node) - def test_dry_run_query_with_data(self, complete_execution_node): - query_config = ScyllaDBQueryConfig(complete_execution_node) - dry_run_query = query_config.dry_run_query() - assert ( - dry_run_query - == "SELECT age, alternative_contacts, ascii_data, big_int_data, do_not_contact, double_data, duration, email, float_data, last_contacted, logins, name, states_lived, timestamp, user_id, uuid FROM users WHERE email = ? ALLOW FILTERING;" + text_clause = config.generate_update_stmt( + row, erasure_policy_two_rules, privacy_request ) - def test_query_to_str(self, complete_execution_node): - query_config = ScyllaDBQueryConfig(complete_execution_node) - statement = ( - "SELECT name FROM users WHERE email = %(email)s", - {"email": "test@example.com"}, + assert ( + text_clause.text + == "UPDATE customer SET email = :masked_email, name = :masked_name WHERE email = :email" ) - query_to_str = query_config.query_to_str(statement, {}) - assert query_to_str == "SELECT name FROM users WHERE email = 'test@example.com'" + # Two different masking strategies used for name and email + assert ( + text_clause._bindparams["masked_name"].value is None + ) # Null masking strategy + assert ( + text_clause._bindparams["masked_email"].value == "*****" + ) # String rewrite masking strategy class TestSQLLikeQueryConfig: diff --git a/tests/ops/service/connectors/test_scylladb_query_config.py b/tests/ops/service/connectors/test_scylladb_query_config.py new file mode 100644 index 0000000000..3cbc6f493f --- /dev/null +++ b/tests/ops/service/connectors/test_scylladb_query_config.py @@ -0,0 +1,47 @@ +import pytest +from fideslang.models import Dataset + +from fides.api.graph.config import CollectionAddress +from fides.api.graph.graph import DatasetGraph +from fides.api.graph.traversal import Traversal +from fides.api.models.datasetconfig import convert_dataset_to_graph +from fides.api.service.connectors.scylla_query_config import ScyllaDBQueryConfig + + +class TestScyllaDBQueryConfig: + @pytest.fixture(scope="function") + def complete_execution_node( + self, example_datasets, integration_scylladb_config_with_keyspace + ): + dataset = Dataset(**example_datasets[15]) + graph = convert_dataset_to_graph( + dataset, integration_scylladb_config_with_keyspace.key + ) + dataset_graph = DatasetGraph(*[graph]) + identity = {"email": "customer-1@example.com"} + scylla_traversal = Traversal(dataset_graph, identity) + return scylla_traversal.traversal_node_dict[ + CollectionAddress("scylladb_example_test_dataset", "users") + ].to_mock_execution_node() + + def test_dry_run_query_no_data(self, scylladb_execution_node): + query_config = ScyllaDBQueryConfig(scylladb_execution_node) + dry_run_query = query_config.dry_run_query() + assert dry_run_query is None + + def test_dry_run_query_with_data(self, complete_execution_node): + query_config = ScyllaDBQueryConfig(complete_execution_node) + dry_run_query = query_config.dry_run_query() + assert ( + dry_run_query + == "SELECT age, alternative_contacts, ascii_data, big_int_data, do_not_contact, double_data, duration, email, float_data, last_contacted, logins, name, states_lived, timestamp, user_id, uuid FROM users WHERE email = ? ALLOW FILTERING;" + ) + + def test_query_to_str(self, complete_execution_node): + query_config = ScyllaDBQueryConfig(complete_execution_node) + statement = ( + "SELECT name FROM users WHERE email = %(email)s", + {"email": "test@example.com"}, + ) + query_to_str = query_config.query_to_str(statement, {}) + assert query_to_str == "SELECT name FROM users WHERE email = 'test@example.com'" diff --git a/tests/ops/service/connectors/test_snowflake_query_config.py b/tests/ops/service/connectors/test_snowflake_query_config.py index 5521a1a88a..4f4b23b8c4 100644 --- a/tests/ops/service/connectors/test_snowflake_query_config.py +++ b/tests/ops/service/connectors/test_snowflake_query_config.py @@ -150,7 +150,7 @@ def test_generate_update_stmt( ) assert ( str(update_stmt) - == 'UPDATE "address" SET "city" = :city, "house" = :house, "state" = :state, "street" = :street, "zip" = :zip WHERE "id" = :id' + == 'UPDATE "address" SET "city" = :masked_city, "house" = :masked_house, "state" = :masked_state, "street" = :masked_street, "zip" = :masked_zip WHERE "id" = :id' ) def test_generate_namespaced_update_stmt( @@ -191,5 +191,5 @@ def test_generate_namespaced_update_stmt( ) assert ( str(update_stmt) - == 'UPDATE "FIDESOPS_TEST"."TEST"."address" SET "city" = :city, "house" = :house, "state" = :state, "street" = :street, "zip" = :zip WHERE "id" = :id' + == 'UPDATE "FIDESOPS_TEST"."TEST"."address" SET "city" = :masked_city, "house" = :masked_house, "state" = :masked_state, "street" = :masked_street, "zip" = :masked_zip WHERE "id" = :id' ) diff --git a/tests/ops/service/dataset/example_datasets/multiple_identities.yml b/tests/ops/service/dataset/example_datasets/multiple_identities.yml index 053afb3ced..dd76dbfa6d 100644 --- a/tests/ops/service/dataset/example_datasets/multiple_identities.yml +++ b/tests/ops/service/dataset/example_datasets/multiple_identities.yml @@ -16,8 +16,6 @@ dataset: data_type: string - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: diff --git a/tests/ops/service/dataset/example_datasets/multiple_identities_with_external_dependency.yml b/tests/ops/service/dataset/example_datasets/multiple_identities_with_external_dependency.yml index fdfcd32bfc..db9e227a74 100644 --- a/tests/ops/service/dataset/example_datasets/multiple_identities_with_external_dependency.yml +++ b/tests/ops/service/dataset/example_datasets/multiple_identities_with_external_dependency.yml @@ -32,7 +32,5 @@ dataset: direction: from - name: id data_categories: [system.operations] - fides_meta: - primary_key: True - name: shipping_address_id data_categories: [system.operations] diff --git a/tests/ops/service/dataset/example_datasets/no_identities.yml b/tests/ops/service/dataset/example_datasets/no_identities.yml index fac879de99..82b56f9c65 100644 --- a/tests/ops/service/dataset/example_datasets/no_identities.yml +++ b/tests/ops/service/dataset/example_datasets/no_identities.yml @@ -13,8 +13,6 @@ dataset: data_categories: [user.contact.email] - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: diff --git a/tests/ops/service/dataset/example_datasets/single_identity.yml b/tests/ops/service/dataset/example_datasets/single_identity.yml index 19cdc7df3e..ce1506886d 100644 --- a/tests/ops/service/dataset/example_datasets/single_identity.yml +++ b/tests/ops/service/dataset/example_datasets/single_identity.yml @@ -16,8 +16,6 @@ dataset: data_type: string - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: diff --git a/tests/ops/service/dataset/example_datasets/single_identity_with_internal_dependency.yml b/tests/ops/service/dataset/example_datasets/single_identity_with_internal_dependency.yml index 708aefbaf0..af73f8bcb8 100644 --- a/tests/ops/service/dataset/example_datasets/single_identity_with_internal_dependency.yml +++ b/tests/ops/service/dataset/example_datasets/single_identity_with_internal_dependency.yml @@ -16,8 +16,6 @@ dataset: data_type: string - name: id data_categories: [user.unique_id] - fides_meta: - primary_key: True - name: name data_categories: [user.name] fides_meta: diff --git a/tests/ops/service/privacy_request/test_bigquery_enterprise_privacy_request.py b/tests/ops/service/privacy_request/test_bigquery_enterprise_privacy_request.py index 8fb7e29729..5a133c031f 100644 --- a/tests/ops/service/privacy_request/test_bigquery_enterprise_privacy_request.py +++ b/tests/ops/service/privacy_request/test_bigquery_enterprise_privacy_request.py @@ -1,27 +1,9 @@ -import time -from datetime import datetime, timezone -from typing import Any, Dict, List, Set from unittest import mock -from unittest.mock import ANY, Mock, call -from uuid import uuid4 -import pydash import pytest from fides.api.models.audit_log import AuditLog, AuditLogAction -from fides.api.models.privacy_request import ( - ActionType, - CheckpointActionRequired, - ExecutionLog, - ExecutionLogStatus, - PolicyPreWebhook, - PrivacyRequest, - PrivacyRequestStatus, -) -from fides.api.schemas.masking.masking_configuration import MaskingConfiguration -from fides.api.schemas.masking.masking_secrets import MaskingSecretCache -from fides.api.schemas.policy import Rule -from fides.api.service.masking.strategy.masking_strategy import MaskingStrategy +from fides.api.models.privacy_request import ExecutionLog from tests.ops.service.privacy_request.test_request_runner_service import ( get_privacy_request_results, ) @@ -54,7 +36,7 @@ def test_create_and_process_access_request_bigquery_enterprise( customer_email = "customer-1@example.com" user_id = ( - 1754 # this is a real (not generated) user id in the Stackoverflow dataset + 1754 # this is a real (not generated) user id in the Stack Overflow dataset ) data = { "requested_at": "2024-08-30T16:09:37.359Z", diff --git a/tests/ops/service/privacy_request/test_postgres_privacy_requests.py b/tests/ops/service/privacy_request/test_postgres_privacy_requests.py index 2959efd463..3f5ec661f5 100644 --- a/tests/ops/service/privacy_request/test_postgres_privacy_requests.py +++ b/tests/ops/service/privacy_request/test_postgres_privacy_requests.py @@ -160,9 +160,16 @@ def test_upload_access_results_has_data_use_map( "dsr_version", ["use_dsr_3_0", "use_dsr_2_0"], ) +@pytest.mark.parametrize( + "dataset_config", + [ + "postgres_example_test_dataset_config_read_access", + "postgres_example_test_dataset_config_read_access_without_primary_keys", + ], +) def test_create_and_process_access_request_postgres( trigger_webhook_mock, - postgres_example_test_dataset_config_read_access, + dataset_config, postgres_integration_db, db, cache, @@ -174,6 +181,7 @@ def test_create_and_process_access_request_postgres( run_privacy_request_task, ): request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + request.getfixturevalue(dataset_config) customer_email = "customer-1@example.com" data = { @@ -196,7 +204,7 @@ def test_create_and_process_access_request_postgres( assert results[key] is not None assert results[key] != {} - result_key_prefix = f"postgres_example_test_dataset:" + result_key_prefix = "postgres_example_test_dataset:" customer_key = result_key_prefix + "customer" assert results[customer_key][0]["email"] == customer_email @@ -278,14 +286,14 @@ def test_create_and_process_access_request_with_custom_identities_postgres( assert results[key] is not None assert results[key] != {} - result_key_prefix = f"postgres_example_test_dataset:" + result_key_prefix = "postgres_example_test_dataset:" customer_key = result_key_prefix + "customer" assert results[customer_key][0]["email"] == customer_email visit_key = result_key_prefix + "visit" assert results[visit_key][0]["email"] == customer_email - loyalty_key = f"postgres_example_test_extended_dataset:loyalty" + loyalty_key = "postgres_example_test_extended_dataset:loyalty" assert results[loyalty_key][0]["id"] == loyalty_id log_id = pr.execution_logs[0].id @@ -355,7 +363,7 @@ def test_create_and_process_access_request_with_valid_skipped_collection( assert "login" not in results.keys() - result_key_prefix = f"postgres_example_test_dataset:" + result_key_prefix = "postgres_example_test_dataset:" customer_key = result_key_prefix + "customer" assert results[customer_key][0]["email"] == customer_email @@ -712,9 +720,16 @@ def test_create_and_process_erasure_request_with_table_joins( "dsr_version", ["use_dsr_3_0", "use_dsr_2_0"], ) +@pytest.mark.parametrize( + "dataset_config", + [ + "postgres_example_test_dataset_config_read_access", + "postgres_example_test_dataset_config_read_access_without_primary_keys", + ], +) def test_create_and_process_erasure_request_read_access( postgres_integration_db, - postgres_example_test_dataset_config_read_access, + dataset_config, db, cache, erasure_policy, @@ -723,6 +738,7 @@ def test_create_and_process_erasure_request_read_access( run_privacy_request_task, ): request.getfixturevalue(dsr_version) # REQUIRED to test both DSR 3.0 and 2.0 + request.getfixturevalue(dataset_config) customer_email = "customer-2@example.com" customer_id = 2 @@ -739,7 +755,7 @@ def test_create_and_process_erasure_request_read_access( data, ) errored_execution_logs = pr.execution_logs.filter_by(status="error") - assert errored_execution_logs.count() == 9 + assert errored_execution_logs.count() == 11 assert ( errored_execution_logs[0].message == "No values were erased since this connection " diff --git a/tests/ops/test_helpers/dataset_utils.py b/tests/ops/test_helpers/dataset_utils.py index e60efb9892..d51e1f47ff 100644 --- a/tests/ops/test_helpers/dataset_utils.py +++ b/tests/ops/test_helpers/dataset_utils.py @@ -13,7 +13,11 @@ ) from fides.api.graph.data_type import DataType, get_data_type, to_data_type_string from fides.api.models.connectionconfig import ConnectionConfig -from fides.api.models.datasetconfig import DatasetConfig, convert_dataset_to_graph +from fides.api.models.datasetconfig import ( + DatasetConfig, + DatasetField, + convert_dataset_to_graph, +) from fides.api.util.collection_util import Row SAAS_DATASET_DIRECTORY = "data/saas/dataset/" @@ -231,3 +235,27 @@ def get_simple_fields(fields: Iterable[Field]) -> List[Dict[str, Any]]: object["fields"] = get_simple_fields(field.fields.values()) object_list.append(object) return object_list + + +def remove_primary_keys(dataset: Dataset) -> Dataset: + """Returns a copy of the dataset with primary key fields removed from fides_meta.""" + dataset_copy = dataset.model_copy(deep=True) + + for collection in dataset_copy.collections: + for field in collection.fields: + if field.fides_meta: + if field.fides_meta.primary_key: + field.fides_meta.primary_key = None + if field.fields: + _remove_nested_primary_keys(field.fields) + + return dataset_copy + + +def _remove_nested_primary_keys(fields: List[DatasetField]) -> None: + """Helper function to recursively remove primary keys from nested fields.""" + for field in fields: + if field.fides_meta and field.fides_meta.primary_key: + field.fides_meta.primary_key = None + if field.fields: + _remove_nested_primary_keys(field.fields)