diff --git a/aquadoggo/src/db/query/filter.rs b/aquadoggo/src/db/query/filter.rs index 0eb00e06f..d288eda53 100644 --- a/aquadoggo/src/db/query/filter.rs +++ b/aquadoggo/src/db/query/filter.rs @@ -414,12 +414,12 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Contains("Panda is the best".into()) ); - assert_eq!(filter.get(0).unwrap().exclusive, false,); + assert!(!filter.get(0).unwrap().exclusive,); assert_eq!( filter.get(1).unwrap().by, FilterBy::Contains("Llama".into()) ); - assert_eq!(filter.get(1).unwrap().exclusive, true); + assert!(filter.get(1).unwrap().exclusive); } #[test] @@ -450,7 +450,7 @@ mod tests { assert_eq!(filter.len(), 1); assert_eq!(filter.get(0).unwrap().field, field); - assert_eq!(filter.get(0).unwrap().exclusive, false); + assert!(!filter.get(0).unwrap().exclusive); assert_eq!( filter.get(0).unwrap().by, FilterBy::Set(vec![panda, turtle, llama]) @@ -535,7 +535,7 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Element(OperationValue::Boolean(false)) ); - assert_eq!(filter.get(0).unwrap().exclusive, false); + assert!(!filter.get(0).unwrap().exclusive); // Change exclusive filter manually filter.add_not( @@ -549,7 +549,7 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Element(OperationValue::Boolean(true)) ); - assert_eq!(filter.get(0).unwrap().exclusive, true); + assert!(filter.get(0).unwrap().exclusive); // Change filter manually again filter.add( @@ -563,7 +563,7 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Element(OperationValue::Boolean(false)) ); - assert_eq!(filter.get(0).unwrap().exclusive, false); + assert!(!filter.get(0).unwrap().exclusive); } #[test] @@ -571,16 +571,10 @@ mod tests { let mut filter = Filter::new(); // Add boolean filter - filter.add( - &Field::new("is_admin".into()), - &OperationValue::Boolean(true), - ); + filter.add(&Field::new("is_admin"), &OperationValue::Boolean(true)); // Overwrite the same filter with other value - filter.add( - &Field::new("is_admin".into()), - &OperationValue::Boolean(false), - ); + filter.add(&Field::new("is_admin"), &OperationValue::Boolean(false)); // .. check if it got correctly overwritten assert_eq!(filter.len(), 1); @@ -588,13 +582,10 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Element(OperationValue::Boolean(false)) ); - assert_eq!(filter.get(0).unwrap().exclusive, false); + assert!(!filter.get(0).unwrap().exclusive); // Overwrite it again, but with an exclusive filter - filter.add_not( - &Field::new("is_admin".into()), - &OperationValue::Boolean(false), - ); + filter.add_not(&Field::new("is_admin"), &OperationValue::Boolean(false)); // .. check if it got correctly overwritten assert_eq!(filter.len(), 1); @@ -602,6 +593,6 @@ mod tests { filter.get(0).unwrap().by, FilterBy::Element(OperationValue::Boolean(false)) ); - assert_eq!(filter.get(0).unwrap().exclusive, true); + assert!(filter.get(0).unwrap().exclusive); } } diff --git a/aquadoggo/src/db/query/test_utils.rs b/aquadoggo/src/db/query/test_utils.rs index 80992d3ae..e92e3e372 100644 --- a/aquadoggo/src/db/query/test_utils.rs +++ b/aquadoggo/src/db/query/test_utils.rs @@ -138,6 +138,6 @@ mod tests { #[case] value: &[OperationValue], #[case] expected: (String, FilterBy, bool), ) { - assert_eq!(parse_str(key, &value).expect("Should succeed"), expected); + assert_eq!(parse_str(key, value).expect("Should succeed"), expected); } } diff --git a/aquadoggo/src/db/query/validate.rs b/aquadoggo/src/db/query/validate.rs index 8b2b2750e..00ef7237d 100644 --- a/aquadoggo/src/db/query/validate.rs +++ b/aquadoggo/src/db/query/validate.rs @@ -229,7 +229,7 @@ mod tests { #[case::invalid_meta_field_type( Select::default(), Filter::new().meta_fields(&[ - ("documentId".into(), &["test".into()]) + ("documentId", &["test".into()]) ]), Order::default(), "Filter type 'str' for field 'documentId' is not matching schema type 'relation(doggo_schema_0020b177ec1bf26dfb3b7010d473e6d44713b29b765b99c6e60ecbfae742de496543)'" @@ -237,7 +237,7 @@ mod tests { #[case::invalid_field_type( Select::default(), Filter::new().fields(&[ - ("username".into(), &[2020.into()]) + ("username", &[2020.into()]) ]), Order::default(), "Filter type 'int' for field 'username' is not matching schema type 'str'" @@ -245,7 +245,7 @@ mod tests { #[case::invalid_interval( Select::default(), Filter::new().fields(&[ - ("is_admin_in".into(), &[true.into(), false.into()]) + ("is_admin_in", &[true.into(), false.into()]) ]), Order::default(), "Can't apply set filter as field 'is_admin' is of type boolean" @@ -253,7 +253,7 @@ mod tests { #[case::invalid_search( Select::default(), Filter::new().fields(&[ - ("age_contains".into(), &[22.into()]) + ("age_contains", &[22.into()]) ]), Order::default(), "Can't apply search filter as field 'age' is not of type string" @@ -261,7 +261,7 @@ mod tests { #[case::invalid_set_types( Select::default(), Filter::new().fields(&[ - ("username_in".into(), &["bubu".into(), 2020.into()]) + ("username_in", &["bubu".into(), 2020.into()]) ]), Order::default(), "Filter type 'int' for field 'username' is not matching schema type 'str'" diff --git a/aquadoggo/src/db/stores/blob.rs b/aquadoggo/src/db/stores/blob.rs index a1e7304d6..a28d2f8f7 100644 --- a/aquadoggo/src/db/stores/blob.rs +++ b/aquadoggo/src/db/stores/blob.rs @@ -410,7 +410,7 @@ mod tests { fn get_blob(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Get blob by document id @@ -537,7 +537,7 @@ mod tests { fn purge_blob(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 7, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 7, "text/plain", &key_pair).await; // There is one blob and two blob pieces in database. // @@ -580,7 +580,7 @@ mod tests { let _ = populate_and_materialize(&mut node, &config).await; let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 7, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 7, "text/plain", &key_pair).await; // There is one blob and two blob pieces in database. // @@ -614,7 +614,7 @@ mod tests { fn does_not_purge_blob_if_still_pinned(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 7, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 7, "text/plain", &key_pair).await; let _ = add_schema_and_documents( &mut node, @@ -658,7 +658,7 @@ mod tests { fn purge_all_pieces_of_updated_blob(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 7, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 7, "text/plain", &key_pair).await; // Create a new blob piece. let new_blob_pieces = add_document( diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 11be9a209..f00f62638 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -1323,7 +1323,7 @@ mod tests { let document = node .context .store - .get_document_by_view_id(&document.view_id()) + .get_document_by_view_id(document.view_id()) .await .unwrap(); assert!(document.is_some()); @@ -1388,7 +1388,7 @@ mod tests { assert_query(&node, "SELECT name FROM document_view_fields", 22).await; // Purge one document from the database, we now expect half the rows to be remaining. - let result = node.context.store.purge_document(&document_id).await; + let result = node.context.store.purge_document(document_id).await; assert!(result.is_ok(), "{:#?}", result); assert_query(&node, "SELECT entry_hash FROM entries", 1).await; @@ -1413,7 +1413,7 @@ mod tests { let document_id = documents[0].id(); let public_key = config.authors[0].public_key(); - let _ = node.context.store.purge_document(&document_id).await; + let _ = node.context.store.purge_document(document_id).await; let result = next_args( &node.context.store, diff --git a/aquadoggo/src/db/stores/query.rs b/aquadoggo/src/db/stores/query.rs index bcfd1d031..125d522df 100644 --- a/aquadoggo/src/db/stores/query.rs +++ b/aquadoggo/src/db/stores/query.rs @@ -1530,7 +1530,7 @@ mod tests { .fields() .expect("Expected document fields") .get(field) - .expect(&format!("Expected '{field}' field to exist in document")) + .unwrap_or_else(|| panic!("{}", "Expected '{field}' field to exist in document")) .value() .to_owned() } @@ -1640,7 +1640,7 @@ mod tests { ), ], ], - &key_pair, + key_pair, ) .await } @@ -1867,9 +1867,9 @@ mod tests { } if view_ids.len() - 1 == index { - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); } else { - assert_eq!(pagination_data.has_next_page, true); + assert!(pagination_data.has_next_page); } assert_eq!(pagination_data.total_count, Some(view_ids.len() as u64)); @@ -1890,7 +1890,7 @@ mod tests { assert_eq!(pagination_data.total_count, Some(view_ids.len() as u64)); assert_eq!(pagination_data.end_cursor, None); - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); assert_eq!(documents.len(), 0); }); } @@ -1944,9 +1944,9 @@ mod tests { } if view_ids_len - 1 == index { - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); } else { - assert_eq!(pagination_data.has_next_page, true); + assert!(pagination_data.has_next_page); } assert_eq!(pagination_data.total_count, Some(5)); @@ -1967,7 +1967,7 @@ mod tests { assert_eq!(pagination_data.total_count, Some(5)); assert_eq!(pagination_data.end_cursor, None); - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); assert_eq!(documents.len(), 0); }); } @@ -2002,7 +2002,7 @@ mod tests { ); // Select the pinned relation list "venues" of the first visited document - let list = RelationList::new_pinned(&visited_view_ids[0], "venues".into()); + let list = RelationList::new_pinned(&visited_view_ids[0], "venues"); let (pagination_data, documents) = node .context @@ -2023,7 +2023,7 @@ mod tests { ); // Select the pinned relation list "venues" of the second visited document - let list = RelationList::new_pinned(&visited_view_ids[1], "venues".into()); + let list = RelationList::new_pinned(&visited_view_ids[1], "venues"); let (pagination_data, documents) = node .context @@ -2093,7 +2093,7 @@ mod tests { ); // Select the pinned relation list "venues" for the visited document - let list = RelationList::new_pinned(&visited_view_id, "venues".into()); + let list = RelationList::new_pinned(&visited_view_id, "venues"); let (_, documents) = node .context @@ -2121,7 +2121,7 @@ mod tests { ); // Select the pinned relation list "venues" for the visited document - let list = RelationList::new_pinned(&visited_view_id, "venues".into()); + let list = RelationList::new_pinned(&visited_view_id, "venues"); let (_, documents) = node .context @@ -2188,7 +2188,7 @@ mod tests { let mut cursor: Option = None; // Select the pinned relation list "venues" of the second visited document - let list = RelationList::new_pinned(&visited_view_ids[0], "venues".into()); + let list = RelationList::new_pinned(&visited_view_ids[0], "venues"); let mut args = Query::new( &Pagination::new( @@ -2227,9 +2227,9 @@ mod tests { } if view_ids_len - 1 == index { - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); } else { - assert_eq!(pagination_data.has_next_page, true); + assert!(pagination_data.has_next_page); } assert_eq!(pagination_data.total_count, Some(7)); @@ -2250,7 +2250,7 @@ mod tests { assert_eq!(pagination_data.total_count, Some(7)); assert_eq!(pagination_data.end_cursor, None); - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); assert_eq!(documents.len(), 0); }); } @@ -2326,7 +2326,7 @@ mod tests { let mut cursor: Option = None; // Select the pinned relation list "venues" of the first visited document - let list = RelationList::new_pinned(&visited_view_ids[0], "venues".into()); + let list = RelationList::new_pinned(&visited_view_ids[0], "venues"); let mut args = Query::new( &Pagination::new( @@ -2368,9 +2368,9 @@ mod tests { // Check if `has_next_page` flag is correct if documents_len - 1 == index as u64 { - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); } else { - assert_eq!(pagination_data.has_next_page, true); + assert!(pagination_data.has_next_page); } // Check if pagination info is correct @@ -2396,7 +2396,7 @@ mod tests { .await .expect("Query failed"); - assert_eq!(pagination_data.has_next_page, false); + assert!(!pagination_data.has_next_page); assert_eq!(pagination_data.total_count, Some(documents_len)); assert_eq!(pagination_data.end_cursor, None); assert_eq!(documents.len(), 0); @@ -2459,7 +2459,7 @@ mod tests { ); // Select the pinned relation list "venues" of the first visited document - let list = RelationList::new_pinned(&visited_view_ids[0], "venues".into()); + let list = RelationList::new_pinned(&visited_view_ids[0], "venues"); let result = node .context diff --git a/aquadoggo/src/graphql/input_values/fields_filter.rs b/aquadoggo/src/graphql/input_values/fields_filter.rs index f0eeb8116..09e047fe4 100644 --- a/aquadoggo/src/graphql/input_values/fields_filter.rs +++ b/aquadoggo/src/graphql/input_values/fields_filter.rs @@ -23,7 +23,7 @@ use crate::graphql::utils::filter_name; pub fn build_filter_input_object(schema: &Schema) -> InputObject { // Construct the document fields object which will be named `Filter` let schema_field_name = filter_name(schema.id()); - let mut filter_input = InputObject::new(&schema_field_name); + let mut filter_input = InputObject::new(schema_field_name); // For every field in the schema we create a type with a resolver for (name, field_type) in schema.fields().iter() { diff --git a/aquadoggo/src/graphql/mutations/publish.rs b/aquadoggo/src/graphql/mutations/publish.rs index bc079eed3..3b0a72c68 100644 --- a/aquadoggo/src/graphql/mutations/publish.rs +++ b/aquadoggo/src/graphql/mutations/publish.rs @@ -104,7 +104,7 @@ mod tests { use p2panda_rs::entry::traits::AsEncodedEntry; use p2panda_rs::entry::{EncodedEntry, EntryBuilder, LogId, SeqNum}; use p2panda_rs::hash::Hash; - use p2panda_rs::identity::{KeyPair, PublicKey}; + use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::encode::encode_operation; use p2panda_rs::operation::{ EncodedOperation, OperationBuilder, OperationValue, PinnedRelationList, @@ -617,7 +617,7 @@ mod tests { 8, 1, Some(HASH.parse().unwrap()), - Some(Hash::new_from_bytes(&vec![2, 3, 4])), + Some(Hash::new_from_bytes(&[2, 3, 4])), Some(EncodedOperation::from_bytes(&OPERATION_ENCODED)), key_pair(PRIVATE_KEY) ).to_string(), @@ -704,7 +704,7 @@ mod tests { // public facing method so we should expect any junk data to arrive. let entry_encoded = entry_encoded.to_string(); - let encoded_operation = hex::encode(encoded_operation.to_owned()); + let encoded_operation = hex::encode(encoded_operation); let expected_error_message = expected_error_message.to_string(); test_runner(|mut node: TestNode| async move { @@ -757,7 +757,7 @@ mod tests { // Iterate over each key pair. for key_pair in &key_pairs { let mut document_id: Option = None; - let public_key = PublicKey::from(key_pair.public_key()); + let public_key = key_pair.public_key(); // Iterate of the number of entries we want to publish. for index in 0..num_of_entries { diff --git a/aquadoggo/src/graphql/objects/document_fields.rs b/aquadoggo/src/graphql/objects/document_fields.rs index 08d858bf1..9f657b9e8 100644 --- a/aquadoggo/src/graphql/objects/document_fields.rs +++ b/aquadoggo/src/graphql/objects/document_fields.rs @@ -12,7 +12,7 @@ use crate::graphql::utils::{fields_name, graphql_type, with_collection_arguments pub fn build_document_fields_object(schema: &Schema) -> Object { // Construct the document fields object which will be named `Fields` let schema_field_name = fields_name(schema.id()); - let mut document_schema_fields = Object::new(&schema_field_name); + let mut document_schema_fields = Object::new(schema_field_name); // For every field in the schema we create a type with a resolver for (name, field_type) in schema.fields().iter() { diff --git a/aquadoggo/src/graphql/queries/collection.rs b/aquadoggo/src/graphql/queries/collection.rs index 885513414..674ea9dc2 100644 --- a/aquadoggo/src/graphql/queries/collection.rs +++ b/aquadoggo/src/graphql/queries/collection.rs @@ -543,7 +543,7 @@ mod tests { ), ], ], - &key_pair, + key_pair, ) .await } @@ -1366,12 +1366,12 @@ mod tests { let total_count = data["query"]["totalCount"].clone().as_i64().unwrap(); let end_cursor = data["query"]["endCursor"].clone().to_string(); let has_next_page = data["query"]["hasNextPage"].clone().as_bool().unwrap(); - return ( + ( documents_len, total_count as usize, end_cursor, has_next_page, - ); + ) } // Publish some lyrics to the node. diff --git a/aquadoggo/src/graphql/queries/document.rs b/aquadoggo/src/graphql/queries/document.rs index 0e0603b97..ea5f89504 100644 --- a/aquadoggo/src/graphql/queries/document.rs +++ b/aquadoggo/src/graphql/queries/document.rs @@ -262,7 +262,7 @@ mod test { // Publish document on node. let view_id = add_document( &mut node, - &schema.id(), + schema.id(), vec![("bool", true.into())], &key_pair, ) diff --git a/aquadoggo/src/http/api.rs b/aquadoggo/src/http/api.rs index c29a47d6f..71c5a3336 100644 --- a/aquadoggo/src/http/api.rs +++ b/aquadoggo/src/http/api.rs @@ -193,7 +193,7 @@ mod tests { fn responds_with_blob_in_http_body(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Make sure to materialize blob on file system @@ -231,7 +231,7 @@ mod tests { fn document_route_responds_with_latest_view(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Make sure to materialize blob on file system @@ -245,7 +245,7 @@ mod tests { // Update the blob let blob_data = "Hello, Panda!".as_bytes(); let blob_view_id_2 = - update_blob(&mut node, &blob_data, 6, &blob_view_id, &key_pair).await; + update_blob(&mut node, blob_data, 6, &blob_view_id, &key_pair).await; blob_task( node.context.clone(), @@ -277,7 +277,7 @@ mod tests { .as_bytes(); let blob_view_id = add_blob( &mut node, - &blob_data, + blob_data, MAX_BLOB_PIECE_LENGTH, "image/svg+xml", &key_pair, @@ -313,7 +313,7 @@ mod tests { fn handles_etag_and_if_none_match_precondition(key_pair: KeyPair) { test_runner(|mut node: TestNode| async move { let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Make sure to materialize blob on file system @@ -350,7 +350,7 @@ mod tests { // 3. Update the blob let blob_data = "Hello, Panda!".as_bytes(); let blob_view_id_2 = - update_blob(&mut node, &blob_data, 6, &blob_view_id, &key_pair).await; + update_blob(&mut node, blob_data, 6, &blob_view_id, &key_pair).await; // Make sure to materialize blob on file system blob_task( diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 95c0bf40a..70bfd78ca 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -227,7 +227,7 @@ mod tests { tokio::time::sleep(Duration::from_millis(500)).await; // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); + assert!(!handle.is_finished()); // Check database for materialized documents let document = node @@ -300,7 +300,7 @@ mod tests { tokio::time::sleep(Duration::from_millis(200)).await; // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); + assert!(!handle.is_finished()); // Check database for materialized documents let document = node @@ -386,7 +386,7 @@ mod tests { SCHEMA_ID.parse().unwrap(), "A test schema", ), - &key_pair, + key_pair, ) .await .expect("Publish entry"); @@ -401,7 +401,7 @@ mod tests { tokio::time::sleep(Duration::from_millis(500)).await; // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); + assert!(!handle.is_finished()); // Check database for materialized documents let document = node @@ -473,7 +473,7 @@ mod tests { tokio::time::sleep(Duration::from_millis(200)).await; // Make sure the service did not crash and is still running - assert_eq!(handle.is_finished(), false); + assert!(!handle.is_finished()); // Check database for materialized documents let document = node diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index 0d75ef6da..4aa289fb9 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -461,7 +461,7 @@ mod tests { let child_document = node .context .store - .get_document(&document_id) + .get_document(document_id) .await .unwrap() .unwrap(); @@ -574,7 +574,7 @@ mod tests { let document_operations = node .context .store - .get_operations_by_document_id(&document_id) + .get_operations_by_document_id(document_id) .await .unwrap(); diff --git a/aquadoggo/src/materializer/tasks/garbage_collection.rs b/aquadoggo/src/materializer/tasks/garbage_collection.rs index 0e109652e..e4a3d153d 100644 --- a/aquadoggo/src/materializer/tasks/garbage_collection.rs +++ b/aquadoggo/src/materializer/tasks/garbage_collection.rs @@ -542,7 +542,7 @@ mod tests { test_runner(|mut node: TestNode| async move { // Create a blob document let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let blob_document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Run a blob task which persists the blob to the filesystem. @@ -636,7 +636,7 @@ mod tests { test_runner(|mut node: TestNode| async move { // Create a blob document let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let blob_document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Run a blob task which persists the blob to the filesystem. @@ -728,7 +728,7 @@ mod tests { test_runner(|mut node: TestNode| async move { // Create a blob document. let blob_data = "Hello, World!".as_bytes(); - let blob_view_id = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let blob_document_id: DocumentId = blob_view_id.to_string().parse().unwrap(); // Run a blob task which persists the blob to the filesystem. @@ -823,7 +823,7 @@ mod tests { // Any type of relation can keep a blob alive, here we create one of each and run // garbage collection tasks for each blob. - let blob_view_id_1 = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id_1 = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let _ = add_schema_and_documents( &mut node, "img", @@ -836,7 +836,7 @@ mod tests { ) .await; - let blob_view_id_2 = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id_2 = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let _ = add_schema_and_documents( &mut node, "img", @@ -849,7 +849,7 @@ mod tests { ) .await; - let blob_view_id_3 = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id_3 = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let _ = add_schema_and_documents( &mut node, "img", @@ -866,7 +866,7 @@ mod tests { ) .await; - let blob_view_id_4 = add_blob(&mut node, &blob_data, 6, "text/plain", &key_pair).await; + let blob_view_id_4 = add_blob(&mut node, blob_data, 6, "text/plain", &key_pair).await; let _ = add_schema_and_documents( &mut node, "img", diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index 1a374e67a..5f424df8f 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -386,7 +386,7 @@ mod tests { schema.id().to_owned(), ), &schema, - &key_pair, + key_pair, ) .await .unwrap(); @@ -642,7 +642,7 @@ mod tests { schema.id().to_owned(), ), &schema, - &key_pair, + key_pair, ) .await .unwrap(); @@ -652,7 +652,7 @@ mod tests { let pre_materialization_operations = node .context .store - .get_operations_by_document_id(&document_id) + .get_operations_by_document_id(document_id) .await .unwrap(); @@ -666,7 +666,7 @@ mod tests { let post_materialization_operations = node .context .store - .get_operations_by_document_id(&document_id) + .get_operations_by_document_id(document_id) .await .unwrap(); diff --git a/aquadoggo/src/network/peers/behaviour.rs b/aquadoggo/src/network/peers/behaviour.rs index 0a608073e..6a631d404 100644 --- a/aquadoggo/src/network/peers/behaviour.rs +++ b/aquadoggo/src/network/peers/behaviour.rs @@ -307,7 +307,7 @@ mod tests { Peer::new(swarm_2_peer_id, ConnectionId::new_unchecked(1)), PeerMessage::SyncMessage(SyncMessage::new( 0, - Message::SyncRequest(0.into(), SchemaIdSet::new(&vec![])), + Message::SyncRequest(0.into(), SchemaIdSet::new(&[])), )), ); @@ -323,8 +323,8 @@ mod tests { #[rstest] #[case( - SchemaIdSet::new(&vec![SchemaId::SchemaFieldDefinition(0)]), - SchemaIdSet::new(&vec![SchemaId::SchemaDefinition(0)]), + SchemaIdSet::new(&[SchemaId::SchemaFieldDefinition(0)]), + SchemaIdSet::new(&[SchemaId::SchemaDefinition(0)]), )] #[case(random_schema_id_set(), random_schema_id_set())] #[tokio::test] diff --git a/aquadoggo/src/replication/service.rs b/aquadoggo/src/replication/service.rs index 06c652f28..8de90aa1e 100644 --- a/aquadoggo/src/replication/service.rs +++ b/aquadoggo/src/replication/service.rs @@ -592,7 +592,7 @@ mod tests { let announcement = Announcement::new(supported_schema_ids.clone()); manager .handle_service_message(ServiceMessage::ReceivedMessage( - remote_peer.clone(), + remote_peer, PeerMessage::Announce(AnnouncementMessage::new(announcement.clone())), )) .await; diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index e1852d00b..fb275f091 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -232,7 +232,7 @@ mod tests { let schema_provider = node_a.context.schema_provider.clone(); populate_and_materialize(&mut node_a, &config).await; - let target_set = SchemaIdSet::new(&vec![config.schema.id().to_owned()]); + let target_set = SchemaIdSet::new(&[config.schema.id().to_owned()]); let mut session = Session::new( &INITIAL_SESSION_ID, &target_set, @@ -253,7 +253,7 @@ mod tests { // 1x Have + 10x Entry + 1x SyncDone = 12 messages assert_eq!(response_messages.len(), 12); - let target_set = SchemaIdSet::new(&vec![config.schema.id().to_owned()]); + let target_set = SchemaIdSet::new(&[config.schema.id().to_owned()]); let mut session = Session::new( &INITIAL_SESSION_ID, &target_set, diff --git a/aquadoggo/src/replication/strategies/log_height.rs b/aquadoggo/src/replication/strategies/log_height.rs index 1e4c7d6ce..fec5a1de2 100644 --- a/aquadoggo/src/replication/strategies/log_height.rs +++ b/aquadoggo/src/replication/strategies/log_height.rs @@ -337,7 +337,7 @@ mod tests { expected_entries: &Vec<(DocumentId, SortedIndex)>, ) { // Retrieve the entries. - let entries = retrieve_entries(&node.context.store, &remote_needs).await; + let entries = retrieve_entries(&node.context.store, remote_needs).await; // Map the returned value into a more easily testable form (we assume the entries are // correct, here we are testing the entry retrieval logic mainly) @@ -459,7 +459,7 @@ mod tests { &node, &key_pair, &schema, - &document_a, + document_a, vec![("username", "よつば")], ) .await; @@ -469,7 +469,7 @@ mod tests { &node, &key_pair, &schema, - &document_a, + document_a, vec![("username", "ヂャンボ")], ) .await; @@ -508,7 +508,7 @@ mod tests { ) { test_runner_with_manager(move |manager: TestNodeManager| async move { let schema = config.schema.clone(); - let target_set = SchemaIdSet::new(&vec![schema.id().to_owned()]); + let target_set = SchemaIdSet::new(&[schema.id().to_owned()]); let mut node_a = manager.create().await; populate_and_materialize(&mut node_a, &config).await; @@ -553,7 +553,7 @@ mod tests { config: PopulateStoreConfig, ) { test_runner_with_manager(move |manager: TestNodeManager| async move { - let target_set = SchemaIdSet::new(&vec![config.schema.id().to_owned()]); + let target_set = SchemaIdSet::new(&[config.schema.id().to_owned()]); let mut node_a = manager.create().await; let documents = populate_and_materialize(&mut node_a, &config).await; let document_ids: Vec = @@ -614,7 +614,7 @@ mod tests { &mut node_a, &generate_random_bytes(10), 5, - "text/plain".into(), + "text/plain", &key_pair, ) .await; @@ -670,7 +670,7 @@ mod tests { &mut node_a, &generate_random_bytes(10), 2, - "text/plain".into(), + "text/plain", &key_pair, ) .await; diff --git a/aquadoggo/src/test_utils/node.rs b/aquadoggo/src/test_utils/node.rs index 02b038b9e..522fe090d 100644 --- a/aquadoggo/src/test_utils/node.rs +++ b/aquadoggo/src/test_utils/node.rs @@ -235,7 +235,7 @@ pub async fn add_schema_and_documents( documents: Vec)>>, key_pair: &KeyPair, ) -> (Schema, Vec) { - assert!(documents.len() > 0); + assert!(!documents.is_empty()); // Look at first document to automatically derive schema let schema_fields = documents[0] @@ -373,7 +373,7 @@ pub async fn add_blob_pieces( node, &SchemaId::BlobPiece(1), vec![("data", piece.into())], - &key_pair, + key_pair, ) .await; @@ -392,7 +392,7 @@ pub async fn add_blob( ) -> DocumentViewId { let blob_pieces_view_ids = add_blob_pieces(node, body, max_piece_length, key_pair).await; - let blob_view_id = add_document( + add_document( node, &SchemaId::Blob(1), vec![ @@ -400,11 +400,9 @@ pub async fn add_blob( ("mime_type", mime_type.into()), ("pieces", blob_pieces_view_ids.into()), ], - &key_pair, + key_pair, ) - .await; - - blob_view_id + .await } pub async fn update_blob( @@ -416,19 +414,17 @@ pub async fn update_blob( ) -> DocumentViewId { let blob_pieces_view_ids = add_blob_pieces(node, body, max_piece_length, key_pair).await; - let blob_view_id = update_document( + update_document( node, &SchemaId::Blob(1), vec![ ("length", { body.len() as i64 }.into()), ("pieces", blob_pieces_view_ids.into()), ], - &previous, - &key_pair, + previous, + key_pair, ) - .await; - - blob_view_id + .await } // Helper for asserting expected number of items yielded from a SQL query. @@ -519,7 +515,7 @@ pub async fn populate_store(store: &SqlStore, config: &PopulateStoreConfig) -> V .insert_log( entry.log_id(), entry.public_key(), - &config.schema.id(), + config.schema.id(), &document_id, ) .await