diff --git a/CHANGELOG.md b/CHANGELOG.md index 88581d72f..432887990 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Remove unused methods from `EntryStore` [#560](https://github.com/p2panda/aquadoggo/pull/560) - Updates for new hash serialization in p2panda-rs [#569](https://github.com/p2panda/aquadoggo/pull/569) - Use `libp2p` `0.5.3` [#570](https://github.com/p2panda/aquadoggo/pull/570) +- Optimize test data generation methods [#572](https://github.com/p2panda/aquadoggo/pull/572) ## Fixed diff --git a/aquadoggo/src/db/stores/blob.rs b/aquadoggo/src/db/stores/blob.rs index 76eab6bb2..ce6ccbbc3 100644 --- a/aquadoggo/src/db/stores/blob.rs +++ b/aquadoggo/src/db/stores/blob.rs @@ -377,13 +377,12 @@ mod tests { use p2panda_rs::schema::SchemaId; use p2panda_rs::test_utils::fixtures::{key_pair, random_document_view_id}; use p2panda_rs::test_utils::generate_random_bytes; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use rstest::rstest; use crate::db::errors::BlobStoreError; use crate::test_utils::{ add_blob, add_document, add_schema_and_documents, assert_query, populate_and_materialize, - populate_store_config, test_runner, update_document, TestNode, + populate_store_config, test_runner, update_document, PopulateStoreConfig, TestNode, }; use super::BlobStream; @@ -572,7 +571,7 @@ mod tests { #[rstest] fn purge_blob_only_purges_blobs( #[from(populate_store_config)] - #[with(1, 1, 1)] + #[with(1, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, key_pair: KeyPair, ) { diff --git a/aquadoggo/src/db/stores/document.rs b/aquadoggo/src/db/stores/document.rs index 981cbdd05..260596770 100644 --- a/aquadoggo/src/db/stores/document.rs +++ b/aquadoggo/src/db/stores/document.rs @@ -760,7 +760,6 @@ mod tests { use p2panda_rs::test_utils::fixtures::{ key_pair, operation, random_document_id, random_document_view_id, random_operation_id, }; - use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; use p2panda_rs::WithId; use rstest::rstest; @@ -768,26 +767,26 @@ mod tests { use crate::materializer::tasks::reduce_task; use crate::materializer::TaskInput; use crate::test_utils::{ - add_schema_and_documents, assert_query, build_document, populate_and_materialize, - populate_store_config, test_runner, TestNode, + add_schema_and_documents, assert_query, doggo_schema, populate_and_materialize, + populate_store, populate_store_config, test_runner, PopulateStoreConfig, TestNode, }; #[rstest] fn insert_and_get_one_document_view( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document = documents.get(0).expect("At least one document"); // Get the operations and build the document. let operations = node .context .store - .get_operations_by_document_id(&document_id) + .get_operations_by_document_id(document.id()) .await .unwrap(); @@ -859,7 +858,7 @@ mod tests { let retrieved_document = node .context .store - .get_document(&document_id) + .get_document(document.id()) .await .unwrap() .unwrap(); @@ -919,21 +918,18 @@ mod tests { #[rstest] fn inserts_gets_document( #[from(populate_store_config)] - #[with(1, 1, 1)] + #[with(1, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); - - // Build the document. - let document = build_document(&node.context.store, &document_id).await; + let documents = populate_store(&node.context.store, &config).await; + let document = documents.get(0).expect("At least one document"); // The document is successfully inserted into the database, this relies on the // operations already being present and would fail if they were not. - let result = node.context.store.insert_document(&document).await; + let result = node.context.store.insert_document(document).await; assert!(result.is_ok()); // We can retrieve the most recent document view for this document by its id. @@ -980,17 +976,15 @@ mod tests { #[rstest] fn no_view_when_document_deleted( #[from(populate_store_config)] - #[with(10, 1, 1, true)] + #[with(10, 1, vec![KeyPair::new()], true)] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document = documents.get(0).expect("At least one document"); - // Get the operations and build the document. - let document = build_document(&node.context.store, &document_id).await; // Get the view id. let view_id = document.view_id(); @@ -998,7 +992,7 @@ mod tests { assert!(document.view().is_none()); // Here we insert the document. This action also sets its most recent view. - let result = node.context.store.insert_document(&document).await; + let result = node.context.store.insert_document(document).await; assert!(result.is_ok()); // We retrieve the most recent view for this document by its document id, but as the @@ -1026,26 +1020,23 @@ mod tests { #[rstest] fn get_documents_by_schema_deleted_document( #[from(populate_store_config)] - #[with(10, 1, 1, true)] + #[with(10, 1, vec![KeyPair::new()], true)] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); - - // Get the operations and build the document. - let document = build_document(&node.context.store, &document_id).await; + let documents = populate_store(&node.context.store, &config).await; + let document = documents.get(0).expect("At least one document"); // Insert the document, this is possible even though it has been deleted. - let result = node.context.store.insert_document(&document).await; + let result = node.context.store.insert_document(document).await; assert!(result.is_ok()); // When we try to retrieve it by schema id we should NOT get it back. let document_views = node .context .store - .get_documents_by_schema(constants::schema().id()) + .get_documents_by_schema(doggo_schema().id()) .await .unwrap(); assert!(document_views.is_empty()); @@ -1055,19 +1046,19 @@ mod tests { #[rstest] fn updates_a_document( #[from(populate_store_config)] - #[with(10, 1, 1)] + #[with(10, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document = documents.get(0).expect("At least one document"); // Get the operations for this document and sort them into linear order. let operations = node .context .store - .get_operations_by_document_id(&document_id) + .get_operations_by_document_id(document.id()) .await .unwrap(); let document_builder = DocumentBuilder::from(&operations); @@ -1134,7 +1125,7 @@ mod tests { #[rstest] fn gets_documents_by_schema( #[from(populate_store_config)] - #[with(2, 10, 1)] + #[with(2, 10, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { @@ -1157,17 +1148,22 @@ mod tests { #[rstest] fn prunes_document_view( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); - let first_document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let documents = populate_and_materialize(&mut node, &config).await; + let document = documents[0].clone(); + let first_document_view_id: DocumentViewId = document.id().as_str().parse().unwrap(); // Get the current document from the store. - let current_document = node.context.store.get_document(&document_id).await.unwrap(); + let current_document = node + .context + .store + .get_document(document.id()) + .await + .unwrap(); // Get the current view id. let current_document_view_id = current_document.unwrap().view_id().to_owned(); @@ -1221,15 +1217,15 @@ mod tests { #[rstest] fn does_not_prune_pinned_views( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, key_pair: KeyPair, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); - let first_document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let documents = populate_and_materialize(&mut node, &config).await; + let document = documents[0].clone(); + let first_document_view_id: DocumentViewId = document.id().as_str().parse().unwrap(); // Reduce a historic view of an existing document. let _ = reduce_task( @@ -1275,20 +1271,19 @@ mod tests { #[rstest] fn does_not_prune_current_view( #[from(populate_store_config)] - #[with(1, 1, 1)] + #[with(1, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); - let current_document_view_id: DocumentViewId = document_id.as_str().parse().unwrap(); + let documents = populate_and_materialize(&mut node, &config).await; + let document = documents[0].clone(); // Attempt to prune the current document view. let result = node .context .store - .prune_document_view(¤t_document_view_id) + .prune_document_view(document.view_id()) .await; assert!(result.is_ok()); // Returns `false` when pruning failed. @@ -1298,7 +1293,7 @@ mod tests { let document = node .context .store - .get_document_by_view_id(¤t_document_view_id) + .get_document_by_view_id(&document.view_id()) .await .unwrap(); assert!(document.is_some()); @@ -1308,13 +1303,13 @@ mod tests { #[rstest] fn purge_document( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); + let documents = populate_and_materialize(&mut node, &config).await; + let document_id = documents[0].id(); // There is one document in the database which contains an CREATE and UPDATE operation // which were both published by the same author. These are the number of rows we @@ -1328,7 +1323,7 @@ mod tests { assert_query(&node, "SELECT name FROM document_view_fields", 11).await; // Purge this document from the database, we now expect all tables to be empty. - let result = node.context.store.purge_document(&document_id).await; + let result = node.context.store.purge_document(document_id).await; assert!(result.is_ok(), "{:#?}", result); assert_query(&node, "SELECT entry_hash FROM entries", 0).await; assert_query(&node, "SELECT operation_id FROM operations_v1", 0).await; @@ -1343,13 +1338,13 @@ mod tests { #[rstest] fn purging_only_effects_target_document( #[from(populate_store_config)] - #[with(1, 2, 1)] + #[with(1, 2, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); + let documents = populate_and_materialize(&mut node, &config).await; + let document_id = documents[0].id(); // There are two documents in the database which each contain a single CREATE operation // and they were published by the same author. These are the number of rows we expect @@ -1379,14 +1374,14 @@ mod tests { #[rstest] fn next_args_after_purge( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store and materialize all documents. - let (key_pairs, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids[0].clone(); - let public_key = key_pairs[0].public_key(); + let documents = populate_and_materialize(&mut node, &config).await; + let document_id = documents[0].id(); + let public_key = config.authors[0].public_key(); let _ = node.context.store.purge_document(&document_id).await; diff --git a/aquadoggo/src/db/stores/entry.rs b/aquadoggo/src/db/stores/entry.rs index 7a5b24402..7dfb65f95 100644 --- a/aquadoggo/src/db/stores/entry.rs +++ b/aquadoggo/src/db/stores/entry.rs @@ -299,10 +299,11 @@ mod tests { use p2panda_rs::operation::EncodedOperation; use p2panda_rs::storage_provider::traits::EntryStore; use p2panda_rs::test_utils::fixtures::{encoded_entry, encoded_operation, entry, random_hash}; - use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; use rstest::rstest; - use crate::test_utils::{populate_store_config, test_runner, TestNode}; + use crate::test_utils::{ + populate_store, populate_store_config, test_runner, PopulateStoreConfig, TestNode, + }; #[rstest] fn insert_entry( @@ -349,15 +350,15 @@ mod tests { #[rstest] fn try_insert_non_unique_entry( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, _) = populate_store(&node.context.store, &config).await; + let _ = populate_store(&node.context.store, &config).await; // The key pair of the author who published to the note. - let key_pair = key_pairs.get(0).expect("At least one key pair"); + let key_pair = config.authors.get(0).expect("At least one key pair"); // We get back the first entry. let first_entry = node @@ -392,15 +393,16 @@ mod tests { #[rstest] fn latest_entry( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, _) = populate_store(&node.context.store, &config).await; + let _ = populate_store(&node.context.store, &config).await; // The public key of the author who published to the node. - let public_key_in_db = key_pairs + let public_key_in_db = config + .authors .get(0) .expect("At least one key pair") .public_key(); @@ -442,14 +444,15 @@ mod tests { #[rstest] fn entry_by_seq_number( #[from(populate_store_config)] - #[with(10, 1, 1)] + #[with(10, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, _) = populate_store(&node.context.store, &config).await; + let _ = populate_store(&node.context.store, &config).await; // The public key of the author who published to the node. - let public_key = key_pairs + let public_key = config + .authors .get(0) .expect("At least one key pair") .public_key(); @@ -516,15 +519,16 @@ mod tests { #[rstest] fn get_entry( #[from(populate_store_config)] - #[with(20, 1, 1)] + #[with(20, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, _) = populate_store(&node.context.store, &config).await; + let _ = populate_store(&node.context.store, &config).await; // The public key of the author who published to the node. - let public_key = key_pairs + let public_key = config + .authors .get(0) .expect("At least one key pair") .public_key(); @@ -571,13 +575,13 @@ mod tests { #[rstest] fn get_entries_from( #[from(populate_store_config)] - #[with(20, 2, 1)] + #[with(20, 2, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, _) = populate_store(&node.context.store, &config).await; - let public_key = key_pairs[0].public_key(); + let _ = populate_store(&node.context.store, &config).await; + let public_key = config.authors[0].public_key(); let entries = node .context .store diff --git a/aquadoggo/src/db/stores/operation.rs b/aquadoggo/src/db/stores/operation.rs index 73550759f..1bbf19362 100644 --- a/aquadoggo/src/db/stores/operation.rs +++ b/aquadoggo/src/db/stores/operation.rs @@ -411,8 +411,9 @@ impl From<&DocumentViewFieldRow> for OperationCursor { #[cfg(test)] mod tests { use p2panda_rs::document::materialization::build_graph; + use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{DocumentBuilder, DocumentId}; - use p2panda_rs::identity::PublicKey; + use p2panda_rs::identity::{KeyPair, PublicKey}; use p2panda_rs::operation::traits::{AsOperation, WithPublicKey}; use p2panda_rs::operation::{Operation, OperationAction, OperationBuilder, OperationId}; use p2panda_rs::schema::SchemaId; @@ -422,12 +423,12 @@ mod tests { document_id, operation, operation_id, operation_with_schema, public_key, random_document_view_id, random_operation_id, random_previous_operations, schema_id, }; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use p2panda_rs::WithId; use rstest::rstest; use crate::test_utils::{ - doggo_fields, populate_and_materialize, populate_store_config, test_runner, TestNode, + doggo_fields, populate_and_materialize, populate_store_config, test_runner, + PopulateStoreConfig, TestNode, }; use super::OperationCursor; @@ -582,13 +583,13 @@ mod tests { #[rstest] fn get_operations_by_document_id( #[from(populate_store_config)] - #[with(10, 1, 1)] + #[with(10, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store with some entries and operations and materialize documents. - let (_, document_ids) = populate_and_materialize(&mut node, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); + let documents = populate_and_materialize(&mut node, &config).await; + let document_id = documents.get(0).expect("At least one document id").id(); let operations_by_document_id = node .context diff --git a/aquadoggo/src/db/stores/query.rs b/aquadoggo/src/db/stores/query.rs index c441e41e7..d9a32f721 100644 --- a/aquadoggo/src/db/stores/query.rs +++ b/aquadoggo/src/db/stores/query.rs @@ -1488,7 +1488,6 @@ mod tests { use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::storage_provider::traits::DocumentStore; use p2panda_rs::test_utils::fixtures::{key_pair, schema_id}; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use rstest::rstest; use crate::db::models::{OptionalOwner, QueryRow}; @@ -1499,7 +1498,8 @@ mod tests { use crate::db::types::StorageDocument; use crate::test_utils::{ add_document, add_schema, add_schema_and_documents, doggo_fields, doggo_schema, - populate_and_materialize, populate_store_config, test_runner, TestNode, + populate_and_materialize, populate_store_config, test_runner, PopulateStoreConfig, + TestNode, }; use super::{convert_rows, PaginationCursor, Query}; @@ -2632,7 +2632,7 @@ mod tests { #[from(populate_store_config)] // This config will populate the store with 10 documents which each have their username // field updated from "bubu" (doggo_schema) to "me" - #[with(2, 10, 1, false, doggo_schema(), doggo_fields(), + #[with(2, 10, vec![KeyPair::new()], false, doggo_schema(), doggo_fields(), vec![("username", OperationValue::String("me".to_string()))] )] config: PopulateStoreConfig, diff --git a/aquadoggo/src/db/stores/schema.rs b/aquadoggo/src/db/stores/schema.rs index e00f99f67..4435f178a 100644 --- a/aquadoggo/src/db/stores/schema.rs +++ b/aquadoggo/src/db/stores/schema.rs @@ -129,12 +129,11 @@ mod tests { use p2panda_rs::identity::KeyPair; use p2panda_rs::schema::{FieldType, SchemaId, SchemaName}; use p2panda_rs::test_utils::fixtures::{key_pair, random_document_view_id}; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use rstest::rstest; use crate::test_utils::{ add_document, add_schema, populate_and_materialize, populate_store_config, test_runner, - TestNode, + PopulateStoreConfig, TestNode, }; #[rstest] @@ -257,7 +256,7 @@ mod tests { fn test_get_schema_for_missing_view( random_document_view_id: DocumentViewId, #[from(populate_store_config)] - #[with(2, 10, 1)] + #[with(2, 10, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { diff --git a/aquadoggo/src/graphql/mutations/publish.rs b/aquadoggo/src/graphql/mutations/publish.rs index 688693956..bc079eed3 100644 --- a/aquadoggo/src/graphql/mutations/publish.rs +++ b/aquadoggo/src/graphql/mutations/publish.rs @@ -118,7 +118,6 @@ mod tests { entry_signed_encoded_unvalidated, key_pair, operation_fields, random_hash, update_operation, }; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use rstest::{fixture, rstest}; use serde_json::json; use tokio::sync::broadcast; @@ -128,7 +127,7 @@ mod tests { use crate::http::HttpServiceContext; use crate::test_utils::{ add_schema, doggo_fields, doggo_schema, http_test_client, populate_and_materialize, - populate_store_config, test_runner, TestNode, + populate_store_config, test_runner, PopulateStoreConfig, TestNode, }; // Schema used in some of the tests in this module, it only has one field so it's easy to @@ -222,7 +221,7 @@ mod tests { #[rstest] fn publish_entry( #[from(populate_store_config)] - #[with(0, 0, 0, false, test_schema())] + #[with(0, 0, vec![], false, test_schema())] config: PopulateStoreConfig, publish_request: Request, ) { @@ -262,7 +261,7 @@ mod tests { #[rstest] fn publish_entry_with_empty_relation_list( #[from(populate_store_config)] - #[with(0, 0, 0, false, test_schema())] + #[with(0, 0, vec![], false, test_schema())] config: PopulateStoreConfig, key_pair: KeyPair, ) { @@ -320,7 +319,7 @@ mod tests { #[rstest] fn sends_message_on_communication_bus( #[from(populate_store_config)] - #[with(0, 0, 0, false, test_schema())] + #[with(0, 0, vec![], false, test_schema())] config: PopulateStoreConfig, publish_request: Request, ) { @@ -357,7 +356,7 @@ mod tests { #[rstest] fn post_gql_mutation( #[from(populate_store_config)] - #[with(0, 0, 0, false, test_schema())] + #[with(0, 0, vec![], false, test_schema())] config: PopulateStoreConfig, publish_request: Request, ) { @@ -568,7 +567,7 @@ mod tests { #[case] encoded_operation: &[u8], #[case] expected_error_message: &str, #[from(populate_store_config)] - #[with(0, 0, 0, false, test_schema())] + #[with(0, 0, vec![], false, test_schema())] config: PopulateStoreConfig, ) { // Test that encoded entries and operations are correctly validated when passed into @@ -697,7 +696,7 @@ mod tests { #[case] encoded_operation: &[u8], #[case] expected_error_message: &str, #[from(populate_store_config)] - #[with(10, 1, 1, false, test_schema(), vec![("message", OperationValue::String("Hello!".to_string()))], vec![("message", OperationValue::String("Hello!".to_string()))])] + #[with(10, 1, vec![key_pair(PRIVATE_KEY)], false, test_schema(), vec![("message", OperationValue::String("Hello!".to_string()))], vec![("message", OperationValue::String("Hello!".to_string()))])] config: PopulateStoreConfig, ) { // Test that entries and operations passed into the qraphql publish endpoint adhere to the @@ -740,7 +739,7 @@ mod tests { #[rstest] fn publish_many_entries( #[from(populate_store_config)] - #[with(0, 0, 0, false, doggo_schema())] + #[with(0, 0, vec![], false, doggo_schema())] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { @@ -832,13 +831,13 @@ mod tests { #[rstest] fn duplicate_publishing_of_entries( #[from(populate_store_config)] - #[with(1, 1, 1, false, doggo_schema())] + #[with(1, 1, vec![KeyPair::new()], false, doggo_schema())] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populates the node with entries, operations and schemas. - let (key_pairs, _) = populate_and_materialize(&mut node, &config).await; - let public_key = key_pairs[0].public_key(); + let _ = populate_and_materialize(&mut node, &config).await; + let public_key = config.authors[0].public_key(); // Init the test client. let client = http_test_client(&node).await; diff --git a/aquadoggo/src/graphql/queries/next_args.rs b/aquadoggo/src/graphql/queries/next_args.rs index 88fc6971a..0dab9a980 100644 --- a/aquadoggo/src/graphql/queries/next_args.rs +++ b/aquadoggo/src/graphql/queries/next_args.rs @@ -92,12 +92,13 @@ fn parse_arguments( #[cfg(test)] mod tests { use async_graphql::{value, Response}; - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; + use p2panda_rs::{document::traits::AsDocument, identity::KeyPair, test_utils::constants}; use rstest::rstest; use serde_json::json; use crate::test_utils::{ - http_test_client, populate_and_materialize, populate_store_config, test_runner, TestNode, + http_test_client, populate_and_materialize, populate_store_config, test_runner, + PopulateStoreConfig, TestNode, }; #[rstest] @@ -142,19 +143,16 @@ mod tests { #[rstest] fn next_args_valid_query_with_document_id( #[from(populate_store_config)] - #[with(1, 1, 1)] + #[with(1, 1, vec![KeyPair::from_private_key_str(constants::PRIVATE_KEY).unwrap()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populates the store and materialises documents and schema. - let (key_pairs, document_ids) = populate_and_materialize(&mut node, &config).await; + let documents = populate_and_materialize(&mut node, &config).await; let client = http_test_client(&node).await; - let document_id = document_ids.get(0).expect("There should be a document id"); - let public_key = key_pairs - .get(0) - .expect("There should be a key pair") - .public_key(); + let document_id = documents[0].id(); + let public_key = config.authors[0].public_key(); // Selected fields need to be alphabetically sorted because that's what the `json` // macro that is used in the assert below produces. diff --git a/aquadoggo/src/materializer/service.rs b/aquadoggo/src/materializer/service.rs index 7b1ebb45c..95c0bf40a 100644 --- a/aquadoggo/src/materializer/service.rs +++ b/aquadoggo/src/materializer/service.rs @@ -151,9 +151,7 @@ mod tests { use p2panda_rs::storage_provider::traits::DocumentStore; use p2panda_rs::test_utils::constants::SCHEMA_ID; use p2panda_rs::test_utils::fixtures::{key_pair, operation, operation_fields, schema}; - use p2panda_rs::test_utils::memory_store::helpers::{ - populate_store, send_to_store, PopulateStoreConfig, - }; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use rstest::rstest; use tokio::sync::{broadcast, oneshot}; use tokio::task; @@ -162,7 +160,8 @@ mod tests { use crate::materializer::{Task, TaskInput}; use crate::schema::SchemaProvider; use crate::test_utils::{ - doggo_fields, doggo_schema, populate_store_config, test_runner, TestNode, + doggo_fields, doggo_schema, populate_store, populate_store_config, test_runner, + PopulateStoreConfig, TestNode, }; use crate::Configuration; @@ -171,14 +170,14 @@ mod tests { #[rstest] fn materialize_document_from_bus( #[from(populate_store_config)] - #[with(1, 1, 1, false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] + #[with(1, 1, vec![KeyPair::new()], false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] config: PopulateStoreConfig, ) { // Prepare database which inserts data for one document test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("Should be one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); // We can infer the id of the first operation from the document id let first_operation_id: OperationId = document_id.to_string().parse().unwrap(); @@ -249,13 +248,13 @@ mod tests { #[rstest] fn materialize_document_from_last_runtime( #[from(populate_store_config)] - #[with(1, 1, 1, false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] + #[with(1, 1, vec![KeyPair::new()], false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] config: PopulateStoreConfig, ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("Should be one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); // Store a pending "reduce" task from last runtime in the database so it gets picked up by // the materializer service @@ -322,14 +321,14 @@ mod tests { #[rstest] fn materialize_update_document( #[from(populate_store_config)] - #[with(1, 1, 1, false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] + #[with(1, 1, vec![KeyPair::new()], false, schema(vec![("name".to_string(), FieldType::String)], SCHEMA_ID.parse().unwrap(), "A test schema"), vec![("name", OperationValue::String("panda".into()))])] config: PopulateStoreConfig, ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("Should be one document id"); - let key_pair = key_pairs.get(0).expect("Should be one key pair"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); + let key_pair = &config.authors[0]; // We can infer the id of the first operation from the document id let first_operation_id: OperationId = document_id.to_string().parse().unwrap(); @@ -387,7 +386,7 @@ mod tests { SCHEMA_ID.parse().unwrap(), "A test schema", ), - key_pair, + &key_pair, ) .await .expect("Publish entry"); diff --git a/aquadoggo/src/materializer/tasks/dependency.rs b/aquadoggo/src/materializer/tasks/dependency.rs index e9d1de9c3..317541c59 100644 --- a/aquadoggo/src/materializer/tasks/dependency.rs +++ b/aquadoggo/src/materializer/tasks/dependency.rs @@ -253,18 +253,18 @@ mod tests { }; use p2panda_rs::schema::{FieldType, Schema, SchemaId}; use p2panda_rs::storage_provider::traits::{DocumentStore, EntryStore, OperationStore}; + use p2panda_rs::test_utils::constants; use p2panda_rs::test_utils::fixtures::{key_pair, random_document_id, random_document_view_id}; - use p2panda_rs::test_utils::memory_store::helpers::{ - populate_store, send_to_store, PopulateStoreConfig, - }; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use p2panda_rs::WithId; use rstest::rstest; use crate::materializer::tasks::reduce_task; use crate::materializer::{Task, TaskInput}; use crate::test_utils::{ - add_document, add_schema, doggo_schema, populate_store_config, schema_from_fields, - test_runner, test_runner_with_manager, TestNode, TestNodeManager, + add_document, add_schema, doggo_schema, populate_store, populate_store_config, + schema_from_fields, test_runner, test_runner_with_manager, PopulateStoreConfig, TestNode, + TestNodeManager, }; use super::dependency_task; @@ -274,7 +274,7 @@ mod tests { populate_store_config( 1, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))]), vec![("profile_picture", OperationValue::Relation(Relation::new(random_document_id())))], @@ -286,7 +286,7 @@ mod tests { populate_store_config( 1, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![ ("favorite_book_images", OperationValue::RelationList( @@ -306,7 +306,7 @@ mod tests { populate_store_config( 1, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![ ("something_from_the_past", OperationValue::PinnedRelation( @@ -324,7 +324,7 @@ mod tests { populate_store_config( 1, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![ ("many_previous_drafts", OperationValue::PinnedRelationList( @@ -344,7 +344,7 @@ mod tests { populate_store_config( 1, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![ ("one_relation_field", OperationValue::PinnedRelationList( @@ -371,7 +371,7 @@ mod tests { populate_store_config( 4, 1, - 1, + vec![KeyPair::new()], false, schema_from_fields(vec![ ("one_relation_field", OperationValue::PinnedRelationList( @@ -405,21 +405,21 @@ mod tests { ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; + let documents = populate_store(&node.context.store, &config).await; - for document_id in &document_ids { - let input = TaskInput::DocumentId(document_id.clone()); + for document in &documents { + let input = TaskInput::DocumentId(document.id().clone()); reduce_task(node.context.clone(), input) .await .unwrap() .unwrap(); } - for document_id in &document_ids { + for document in &documents { let document = node .context .store - .get_document(document_id) + .get_document(document.id()) .await .unwrap() .unwrap(); @@ -442,15 +442,13 @@ mod tests { fn no_reduce_task_for_materialised_document_relations( key_pair: KeyPair, #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|mut node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("Should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); let input = TaskInput::DocumentId(document_id.clone()); reduce_task(node.context.clone(), input) @@ -527,7 +525,7 @@ mod tests { populate_store_config( 2, 1, - 1, + vec![KeyPair::new()], true, schema_from_fields(vec![ ("profile_picture", OperationValue::Relation( @@ -544,7 +542,7 @@ mod tests { populate_store_config( 2, 1, - 1, + vec![KeyPair::new()], true, schema_from_fields(vec![ ("one_relation_field", OperationValue::PinnedRelationList( @@ -568,10 +566,8 @@ mod tests { fn fails_on_deleted_documents(#[case] config: PopulateStoreConfig) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("Should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); let input = TaskInput::DocumentId(document_id.clone()); reduce_task(node.context.clone(), input).await.unwrap(); @@ -599,7 +595,7 @@ mod tests { #[rstest] fn dispatches_schema_tasks_for_field_definitions( #[from(populate_store_config)] - #[with(1, 1, 1, false, Schema::get_system(SchemaId::SchemaFieldDefinition(1)).unwrap().to_owned(), vec![ + #[with(1, 1, vec![KeyPair::new()], false, Schema::get_system(SchemaId::SchemaFieldDefinition(1)).unwrap().to_owned(), vec![ ("name", OperationValue::String("field_name".to_string())), ("type", FieldType::String.into()), ])] @@ -608,10 +604,8 @@ mod tests { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("Should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); // Materialise the schema field definition. let input = TaskInput::DocumentId(document_id.to_owned()); @@ -681,7 +675,7 @@ mod tests { #[case] schema_create_operation: Operation, #[case] expected_schema_tasks: usize, #[from(populate_store_config)] - #[with(1, 1, 1, false, Schema::get_system(SchemaId::SchemaFieldDefinition(1)).unwrap().to_owned(), vec![ + #[with(1, 1, vec![KeyPair::from_private_key_str(constants::PRIVATE_KEY).unwrap()], false, Schema::get_system(SchemaId::SchemaFieldDefinition(1)).unwrap().to_owned(), vec![ ("name", OperationValue::String("field_name".to_string())), ("type", FieldType::String.into()), ])] @@ -690,10 +684,8 @@ mod tests { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let schema_field_document_id = document_ids - .get(0) - .expect("Should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let schema_field_document_id = documents[0].id(); // Materialise the schema field definition. let input = TaskInput::DocumentId(schema_field_document_id.to_owned()); diff --git a/aquadoggo/src/materializer/tasks/reduce.rs b/aquadoggo/src/materializer/tasks/reduce.rs index ac12d2a51..e86e91fd9 100644 --- a/aquadoggo/src/materializer/tasks/reduce.rs +++ b/aquadoggo/src/materializer/tasks/reduce.rs @@ -289,14 +289,12 @@ async fn reduce_document + WithPublicKey>( #[cfg(test)] mod tests { - use std::convert::TryFrom; - use p2panda_rs::document::materialization::build_graph; use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::{ - Document, DocumentBuilder, DocumentId, DocumentViewFields, DocumentViewId, - DocumentViewValue, + DocumentBuilder, DocumentId, DocumentViewFields, DocumentViewId, DocumentViewValue, }; + use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::traits::AsOperation; use p2panda_rs::operation::OperationValue; use p2panda_rs::schema::Schema; @@ -305,16 +303,15 @@ mod tests { use p2panda_rs::test_utils::fixtures::{ operation, operation_fields, random_document_id, random_document_view_id, schema, }; - use p2panda_rs::test_utils::memory_store::helpers::{ - populate_store, send_to_store, PopulateStoreConfig, - }; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use p2panda_rs::WithId; use rstest::rstest; use crate::materializer::tasks::reduce_task; use crate::materializer::TaskInput; use crate::test_utils::{ - doggo_fields, doggo_schema, populate_store_config, test_runner, TestNode, + doggo_fields, doggo_schema, generate_key_pairs, populate_store, populate_store_config, + test_runner, PopulateStoreConfig, TestNode, }; #[rstest] @@ -323,7 +320,7 @@ mod tests { #[with( 2, 1, - 20, + generate_key_pairs(20), false, doggo_schema(), doggo_fields(), @@ -333,15 +330,20 @@ mod tests { ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; + let documents = populate_store(&node.context.store, &config).await; - for document_id in &document_ids { - let input = TaskInput::DocumentId(document_id.clone()); + for document in &documents { + let input = TaskInput::DocumentId(document.id().clone()); assert!(reduce_task(node.context.clone(), input).await.is_ok()); } - for document_id in &document_ids { - let document = node.context.store.get_document(document_id).await.unwrap(); + for document in &documents { + let document = node + .context + .store + .get_document(document.id()) + .await + .unwrap(); assert_eq!( document.unwrap().get("username").unwrap(), @@ -358,7 +360,7 @@ mod tests { #[with( 1, 1, - 1, + vec![KeyPair::new()], false, constants::schema(), constants::test_fields(), @@ -368,14 +370,9 @@ mod tests { ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("There should be at least one document id"); - - let key_pair = key_pairs - .get(0) - .expect("There should be at least one key_pair"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); + let key_pair = &config.authors[0]; let input = TaskInput::DocumentId(document_id.clone()); @@ -397,7 +394,7 @@ mod tests { schema.id().to_owned(), ), &schema, - key_pair, + &key_pair, ) .await .unwrap(); @@ -421,7 +418,7 @@ mod tests { #[with( 2, 1, - 1, + vec![KeyPair::new()], false, doggo_schema(), doggo_fields(), @@ -432,10 +429,8 @@ mod tests { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("There should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); // Get the operations let document_operations = node @@ -508,48 +503,45 @@ mod tests { #[rstest] fn deleted_documents_have_no_view( #[from(populate_store_config)] - #[with(3, 1, 2, true)] + #[with(3, 1, generate_key_pairs(2), true)] config: PopulateStoreConfig, ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; + let documents = populate_store(&node.context.store, &config).await; - for document_id in &document_ids { - let input = TaskInput::DocumentId(document_id.clone()); + for document in &documents { + let input = TaskInput::DocumentId(document.id().clone()); let tasks = reduce_task(node.context.clone(), input).await.unwrap(); assert_eq!(tasks.unwrap().len(), 1); } - for document_id in &document_ids { - let document = node.context.store.get_document(document_id).await.unwrap(); - assert!(document.is_none()) - } - - let document_operations = node - .context - .store - .get_operations_by_document_id(&document_ids[0]) - .await - .unwrap(); + for document in &documents { + let retrieved_document = node + .context + .store + .get_document(document.id()) + .await + .unwrap(); - let document = Document::try_from(&document_operations).unwrap(); + assert!(retrieved_document.is_none()); - let input = TaskInput::DocumentViewId(document.view_id().clone()); - let tasks = reduce_task(node.context.clone(), input).await.unwrap(); + let input = TaskInput::DocumentViewId(document.view_id().clone()); + let tasks = reduce_task(node.context.clone(), input).await.unwrap(); - assert!(tasks.is_none()); + assert!(tasks.is_none()); + } }); } #[rstest] #[case( - populate_store_config(3, 1, 1, false, doggo_schema(), doggo_fields(), doggo_fields()), + populate_store_config(3, 1, vec![KeyPair::new()], false, doggo_schema(), doggo_fields(), doggo_fields()), vec!["garbage_collection".to_string(), "dependency".to_string()] )] // This document is deleted, it shouldn't spawn a dependency task. #[case( - populate_store_config(3, 1, 1, true, doggo_schema(), doggo_fields(), doggo_fields()), + populate_store_config(3, 1, vec![KeyPair::new()], true, doggo_schema(), doggo_fields(), doggo_fields()), vec!["garbage_collection".to_string()] )] fn returns_correct_dependency_and_prune_tasks( @@ -559,10 +551,8 @@ mod tests { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents. - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("There should be at least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); let input = TaskInput::DocumentId(document_id.clone()); let next_tasks = reduce_task(node.context.clone(), input) @@ -598,14 +588,14 @@ mod tests { #[rstest] fn duplicate_document_view_insertions( #[from(populate_store_config)] - #[with(2, 1, 1)] + #[with(2, 1, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner(|node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any // resulting documents - let (_, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids.get(0).expect("At least one document id"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); // Get the operations and build the document let operations = node @@ -637,7 +627,7 @@ mod tests { #[with( 3, 1, - 1, + vec![KeyPair::new()], false, constants::schema(), constants::test_fields(), @@ -647,14 +637,9 @@ mod tests { ) { test_runner(move |node: TestNode| async move { // Populate the store with some entries and operations but DON'T materialise any resulting documents. - let (key_pairs, document_ids) = populate_store(&node.context.store, &config).await; - let document_id = document_ids - .get(0) - .expect("There should be at least one document id"); - - let key_pair = key_pairs - .get(0) - .expect("There should be at least one key_pair"); + let documents = populate_store(&node.context.store, &config).await; + let document_id = documents[0].id(); + let key_pair = &config.authors[0]; // Now we create and insert an UPDATE operation for this document which is pointing at // the root CREATE operation. @@ -669,7 +654,7 @@ mod tests { schema.id().to_owned(), ), &schema, - key_pair, + &key_pair, ) .await .unwrap(); diff --git a/aquadoggo/src/replication/manager.rs b/aquadoggo/src/replication/manager.rs index 0665a2273..d6756ff81 100644 --- a/aquadoggo/src/replication/manager.rs +++ b/aquadoggo/src/replication/manager.rs @@ -528,7 +528,6 @@ where #[cfg(test)] mod tests { - use p2panda_rs::test_utils::memory_store::helpers::PopulateStoreConfig; use p2panda_rs::Human; use rstest::rstest; use tokio::sync::broadcast; @@ -541,8 +540,8 @@ mod tests { use crate::schema::SchemaProvider; use crate::test_utils::helpers::random_schema_id_set; use crate::test_utils::{ - populate_and_materialize, populate_store_config, test_runner, test_runner_with_manager, - TestNode, TestNodeManager, + generate_key_pairs, populate_and_materialize, populate_store_config, test_runner, + test_runner_with_manager, PopulateStoreConfig, TestNode, TestNodeManager, }; use super::{SyncManager, INITIAL_SESSION_ID}; @@ -1069,7 +1068,7 @@ mod tests { #[rstest] fn sync_lifetime( #[from(populate_store_config)] - #[with(2, 1, 3)] + #[with(2, 1, generate_key_pairs(3))] config_a: PopulateStoreConfig, #[from(populate_store_config)] config_b: PopulateStoreConfig, ) { diff --git a/aquadoggo/src/replication/session.rs b/aquadoggo/src/replication/session.rs index 560e79bcc..e1852d00b 100644 --- a/aquadoggo/src/replication/session.rs +++ b/aquadoggo/src/replication/session.rs @@ -179,15 +179,15 @@ impl Session { #[cfg(test)] mod tests { - use p2panda_rs::test_utils::memory_store::helpers::{populate_store, PopulateStoreConfig}; + use p2panda_rs::identity::KeyPair; use rstest::rstest; use crate::replication::manager::INITIAL_SESSION_ID; use crate::replication::{Message, Mode, SchemaIdSet, SessionState}; use crate::test_utils::helpers::random_schema_id_set; use crate::test_utils::{ - populate_and_materialize, populate_store_config, test_runner, test_runner_with_manager, - TestNode, TestNodeManager, + populate_and_materialize, populate_store, populate_store_config, test_runner, + test_runner_with_manager, PopulateStoreConfig, TestNode, TestNodeManager, }; use super::Session; @@ -224,7 +224,7 @@ mod tests { #[rstest] fn correct_strategy_messages( #[from(populate_store_config)] - #[with(5, 2, 1)] + #[with(5, 2, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner_with_manager(move |manager: TestNodeManager| async move { diff --git a/aquadoggo/src/replication/strategies/log_height.rs b/aquadoggo/src/replication/strategies/log_height.rs index 02eee5cfc..1e4c7d6ce 100644 --- a/aquadoggo/src/replication/strategies/log_height.rs +++ b/aquadoggo/src/replication/strategies/log_height.rs @@ -305,6 +305,7 @@ impl Strategy for LogHeightStrategy { #[cfg(test)] mod tests { + use p2panda_rs::document::traits::AsDocument; use p2panda_rs::document::DocumentId; use p2panda_rs::entry::{EncodedEntry, LogId, SeqNum}; use p2panda_rs::identity::KeyPair; @@ -314,7 +315,7 @@ mod tests { use p2panda_rs::schema::{Schema, SchemaId}; use p2panda_rs::test_utils::fixtures::key_pair; use p2panda_rs::test_utils::generate_random_bytes; - use p2panda_rs::test_utils::memory_store::helpers::{send_to_store, PopulateStoreConfig}; + use p2panda_rs::test_utils::memory_store::helpers::send_to_store; use rstest::rstest; use tokio::sync::broadcast; @@ -324,8 +325,9 @@ mod tests { use crate::replication::strategies::log_height::{retrieve_entries, SortedIndex}; use crate::replication::{LogHeightStrategy, LogHeights, Message, SchemaIdSet}; use crate::test_utils::{ - add_blob, add_schema_and_documents, populate_and_materialize, populate_store_config, - test_runner_with_manager, TestNode, TestNodeManager, + add_blob, add_schema_and_documents, generate_key_pairs, populate_and_materialize, + populate_store_config, test_runner_with_manager, PopulateStoreConfig, TestNode, + TestNodeManager, }; // Helper for retrieving operations ordered as expected for replication and testing the result. @@ -380,21 +382,21 @@ mod tests { #[rstest] fn retrieves_and_sorts_entries( #[from(populate_store_config)] - #[with(3, 1, 2)] + #[with(3, 1, generate_key_pairs(2))] config: PopulateStoreConfig, ) { test_runner_with_manager(move |manager: TestNodeManager| async move { // Create one node and materialize some documents on it. let mut node = manager.create().await; - let (key_pairs, document_ids) = populate_and_materialize(&mut node, &config).await; + let documents = populate_and_materialize(&mut node, &config).await; let schema = config.schema.clone(); // Collect the values for the two authors and documents. - let key_pair_a = key_pairs.get(0).unwrap(); - let key_pair_b = key_pairs.get(1).unwrap(); + let key_pair_a = config.authors.get(0).unwrap(); + let key_pair_b = config.authors.get(1).unwrap(); - let document_a = document_ids.get(0).unwrap(); - let document_b = document_ids.get(1).unwrap(); + let document_a = documents.get(0).unwrap().id(); + let document_b = documents.get(1).unwrap().id(); // Compose the list of logs the a remote might need. let mut remote_needs_all = vec![ @@ -501,7 +503,7 @@ mod tests { #[rstest] fn entry_responses_can_be_ingested( #[from(populate_store_config)] - #[with(5, 2, 1)] + #[with(5, 2, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner_with_manager(move |manager: TestNodeManager| async move { @@ -547,14 +549,15 @@ mod tests { #[rstest] fn calculates_log_heights( #[from(populate_store_config)] - #[with(5, 2, 1)] + #[with(5, 2, vec![KeyPair::new()])] config: PopulateStoreConfig, ) { test_runner_with_manager(move |manager: TestNodeManager| async move { let target_set = SchemaIdSet::new(&vec![config.schema.id().to_owned()]); - let mut node_a = manager.create().await; - let (key_pairs, document_ids) = populate_and_materialize(&mut node_a, &config).await; + let documents = populate_and_materialize(&mut node_a, &config).await; + let document_ids: Vec = + documents.iter().map(AsDocument::id).cloned().collect(); let strategy_a = LogHeightStrategy::new(&target_set, node_a.context.schema_provider.clone()); @@ -562,7 +565,8 @@ mod tests { .local_log_heights(&node_a.context.store, &document_ids) .await; - let expected_log_heights = key_pairs + let expected_log_heights = config + .authors .into_iter() .map(|key_pair| { ( diff --git a/aquadoggo/src/test_utils/helpers.rs b/aquadoggo/src/test_utils/helpers.rs index 64a3603d6..bf57d30db 100644 --- a/aquadoggo/src/test_utils/helpers.rs +++ b/aquadoggo/src/test_utils/helpers.rs @@ -1,14 +1,11 @@ // SPDX-License-Identifier: AGPL-3.0-or-later -use std::convert::TryFrom; - -use p2panda_rs::document::{Document, DocumentId}; use p2panda_rs::hash::Hash; +use p2panda_rs::identity::KeyPair; use p2panda_rs::operation::{ OperationValue, PinnedRelation, PinnedRelationList, Relation, RelationList, }; use p2panda_rs::schema::{Schema, SchemaId, SchemaName}; -use p2panda_rs::storage_provider::traits::OperationStore; use p2panda_rs::test_utils::constants; use p2panda_rs::test_utils::fixtures::{random_document_view_id, schema, schema_fields}; use rstest::fixture; @@ -96,18 +93,6 @@ pub fn doggo_fields() -> Vec<(&'static str, OperationValue)> { ] } -/// Build a document from it's stored operations specified by it's document id. -pub async fn build_document(store: &S, document_id: &DocumentId) -> Document { - // We retrieve the operations. - let document_operations = store - .get_operations_by_document_id(document_id) - .await - .expect("Get operations"); - - // Then we construct the document. - Document::try_from(&document_operations).expect("Build the document") -} - /// Helper for constructing a schema from a vec of field values. pub fn schema_from_fields(fields: Vec<(&str, OperationValue)>) -> Schema { schema( @@ -128,3 +113,7 @@ pub fn random_schema_id_set() -> SchemaIdSet { SchemaId::new_application(&SchemaName::new("events").unwrap(), &document_view_id_2); SchemaIdSet::new(&[system_schema_id, schema_id_1, schema_id_2]) } + +pub fn generate_key_pairs(num: u64) -> Vec { + (0..num).map(|_| KeyPair::new()).collect() +} diff --git a/aquadoggo/src/test_utils/mod.rs b/aquadoggo/src/test_utils/mod.rs index 809354444..6ccd10816 100644 --- a/aquadoggo/src/test_utils/mod.rs +++ b/aquadoggo/src/test_utils/mod.rs @@ -10,9 +10,10 @@ mod runner; pub use client::{http_test_client, TestClient}; pub use config::TestConfiguration; pub use db::{drop_database, initialize_db, initialize_sqlite_db}; -pub use helpers::{build_document, doggo_fields, doggo_schema, schema_from_fields}; +pub use helpers::{doggo_fields, doggo_schema, generate_key_pairs, schema_from_fields}; pub use node::{ add_blob, add_document, add_schema, add_schema_and_documents, assert_query, - populate_and_materialize, populate_store_config, update_blob, update_document, TestNode, + populate_and_materialize, populate_store, populate_store_config, update_blob, update_document, + PopulateStoreConfig, TestNode, }; pub use runner::{test_runner, test_runner_with_manager, TestNodeManager}; diff --git a/aquadoggo/src/test_utils/node.rs b/aquadoggo/src/test_utils/node.rs index b1668c9ea..535a2137b 100644 --- a/aquadoggo/src/test_utils/node.rs +++ b/aquadoggo/src/test_utils/node.rs @@ -1,15 +1,20 @@ // SPDX-License-Identifier: AGPL-3.0-or-later use log::{debug, info}; -use p2panda_rs::document::{DocumentId, DocumentViewId}; -use p2panda_rs::entry::traits::AsEncodedEntry; +use p2panda_rs::api::helpers::get_skiplink_for_entry; +use p2panda_rs::document::traits::AsDocument; +use p2panda_rs::document::{Document, DocumentBuilder, DocumentId, DocumentViewId}; +use p2panda_rs::entry::encode::{encode_entry, sign_entry}; +use p2panda_rs::entry::traits::{AsEncodedEntry, AsEntry}; +use p2panda_rs::entry::{LogId, SeqNum}; +use p2panda_rs::hash::Hash; use p2panda_rs::identity::KeyPair; +use p2panda_rs::operation::encode::encode_operation; use p2panda_rs::operation::{OperationAction, OperationBuilder, OperationId, OperationValue}; use p2panda_rs::schema::{FieldType, Schema, SchemaId, SchemaName}; -use p2panda_rs::storage_provider::traits::OperationStore; -use p2panda_rs::test_utils::memory_store::helpers::{ - populate_store, send_to_store, PopulateStoreConfig, -}; +use p2panda_rs::storage_provider::traits::{EntryStore, LogStore, OperationStore}; +use p2panda_rs::test_utils::memory_store::helpers::send_to_store; +use p2panda_rs::test_utils::memory_store::PublishedOperation; use rstest::fixture; use sqlx::query_scalar; @@ -24,6 +29,45 @@ pub struct TestNode { pub context: Context, } +/// Configuration used when populating the store for testing. +#[derive(Debug)] +pub struct PopulateStoreConfig { + /// Number of entries per log/document. + pub no_of_entries: usize, + + /// Number of logs for each public key. + pub no_of_logs: usize, + + /// Number of public keys, each with logs populated as defined above. + pub authors: Vec, + + /// A boolean flag for wether all logs should contain a delete operation. + pub with_delete: bool, + + /// The schema used for all operations in the db. + pub schema: Schema, + + /// The fields used for every CREATE operation. + pub create_operation_fields: Vec<(&'static str, OperationValue)>, + + /// The fields used for every UPDATE operation. + pub update_operation_fields: Vec<(&'static str, OperationValue)>, +} + +impl Default for PopulateStoreConfig { + fn default() -> Self { + Self { + no_of_entries: 0, + no_of_logs: 0, + authors: vec![], + with_delete: false, + schema: doggo_schema(), + create_operation_fields: doggo_fields(), + update_operation_fields: doggo_fields(), + } + } +} + /// Fixture for constructing a `PopulateStoreConfig` with default values for aquadoggo tests. /// /// Is passed to `p2panda_rs::test_utils::populate_store` or @@ -44,8 +88,8 @@ pub fn populate_store_config( // Number of logs for each public key #[default(0)] no_of_logs: usize, - // Number of authors, each with logs populated as defined above - #[default(0)] no_of_public_keys: usize, + // Key pairs used in data generation + #[default(vec![])] authors: Vec, // A boolean flag for wether all logs should contain a delete operation #[default(false)] with_delete: bool, @@ -62,7 +106,7 @@ pub fn populate_store_config( PopulateStoreConfig { no_of_entries, no_of_logs, - no_of_public_keys, + authors, with_delete, schema, create_operation_fields, @@ -70,56 +114,6 @@ pub fn populate_store_config( } } -/// Populate the store of a `TestNode` with entries and operations according to the passed config -/// and materialise the resulting documents. Additionally adds the relevant schema to the schema -/// provider. -/// -/// Returns the key pairs of authors who published to the node and id's for all documents that were -/// materialised. -pub async fn populate_and_materialize( - node: &mut TestNode, - config: &PopulateStoreConfig, -) -> (Vec, Vec) { - // Populate the store based with entries and operations based on the passed config. - let (key_pairs, document_ids) = populate_store(&node.context.store, config).await; - - // Add the passed schema to the schema store. - // - // Note: The entries and operations which would normally exist for this schema will NOT be - // present in the store, however the node will behave as expect as we directly inserted it into - // the schema provider. - let _ = node - .context - .schema_provider - .update(config.schema.clone()) - .await; - - // Iterate over document id's and materialize into the store. - for document_id in document_ids.clone() { - // Create reduce task input. - let input = TaskInput::DocumentId(document_id); - // Run reduce task and collect returned dependency tasks. - let next_tasks = reduce_task(node.context.clone(), input.clone()) - .await - .expect("Reduce document"); - - // Run dependency tasks. - if let Some(tasks) = next_tasks { - // We only want to issue dependency tasks. - let dependency_tasks = tasks - .iter() - .filter(|task| task.worker_name() == "depenedency"); - - for task in dependency_tasks { - dependency_task(node.context.clone(), task.input().to_owned()) - .await - .expect("Run dependency task"); - } - } - } - (key_pairs, document_ids) -} - /// Publish a document and materialise it in a given `TestNode`. /// /// Also runs dependency task for document. @@ -416,3 +410,182 @@ pub async fn assert_query(node: &TestNode, sql: &str, expected_len: usize) { assert!(result.is_ok(), "{:#?}", result); assert_eq!(result.unwrap().len(), expected_len, "{:?}", sql); } + +/// Helper method for populating the store with test data. +/// +/// Passed parameters define what the store should contain. The first entry in each log contains a +/// valid CREATE operation following entries contain UPDATE operations. If the with_delete flag is set +/// to true the last entry in all logs contain be a DELETE operation. +pub async fn populate_store(store: &SqlStore, config: &PopulateStoreConfig) -> Vec { + let mut documents: Vec = Vec::new(); + for key_pair in &config.authors { + for log_id in 0..config.no_of_logs { + let log_id = LogId::new(log_id as u64); + let mut backlink: Option = None; + let mut previous: Option = None; + let mut current_document = None::; + + for seq_num in 1..config.no_of_entries + 1 { + // Create an operation based on the current seq_num and whether this document should + // contain a DELETE operation + let operation = match seq_num { + // First operation is CREATE + 1 => OperationBuilder::new(config.schema.id()) + .fields(&config.create_operation_fields) + .build() + .expect("Error building operation"), + // Last operation is DELETE if the with_delete flag is set + seq if seq == (config.no_of_entries) && config.with_delete => { + OperationBuilder::new(config.schema.id()) + .action(OperationAction::Delete) + .previous(&previous.expect("Previous should be set")) + .build() + .expect("Error building operation") + } + // All other operations are UPDATE + _ => OperationBuilder::new(config.schema.id()) + .action(OperationAction::Update) + .fields(&config.update_operation_fields) + .previous(&previous.expect("Previous should be set")) + .build() + .expect("Error building operation"), + }; + + // Encode the operation. + let encoded_operation = + encode_operation(&operation).expect("Failed encoding operation"); + + // We need to calculate the skiplink. + let seq_num = SeqNum::new(seq_num as u64).unwrap(); + let skiplink = + get_skiplink_for_entry(store, &seq_num, &log_id, &key_pair.public_key()) + .await + .expect("Failed to get skiplink entry"); + + // Construct and sign the entry. + let entry = sign_entry( + &log_id, + &seq_num, + skiplink.as_ref(), + backlink.as_ref(), + &encoded_operation, + key_pair, + ) + .expect("Failed signing entry"); + + // Encode the entry. + let encoded_entry = encode_entry(&entry).expect("Failed encoding entry"); + + // Retrieve or derive the current document id. + let document_id = match current_document.as_ref() { + Some(document) => document.id().to_owned(), + None => encoded_entry.hash().into(), + }; + + // Now we insert values into the database. + + // If the entries' seq num is 1 we insert a new log here. + if entry.seq_num().is_first() { + store + .insert_log( + entry.log_id(), + entry.public_key(), + &config.schema.id(), + &document_id, + ) + .await + .expect("Failed inserting log into store"); + } + + // Insert the entry into the store. + store + .insert_entry(&entry, &encoded_entry, Some(&encoded_operation)) + .await + .expect("Failed inserting entry into store"); + + // Insert the operation into the store. + store + .insert_operation( + &encoded_entry.hash().into(), + entry.public_key(), + &operation, + &document_id, + ) + .await + .expect("Failed inserting operation into store"); + + // Update the operations sorted index. + store + .update_operation_index( + &encoded_entry.hash().into(), + seq_num.as_u64() as i32 - 1, + ) + .await + .expect("Failed updating operation index"); + + // Now we commit any changes to the document we are creating. + let published_operation = PublishedOperation( + encoded_entry.hash().into(), + operation, + key_pair.public_key(), + document_id, + ); + + // Conditionally create or update the document. + if let Some(mut document) = current_document { + document + .commit(&published_operation) + .expect("Failed updating document"); + current_document = Some(document); + } else { + current_document = Some( + DocumentBuilder::from(&vec![published_operation]) + .build() + .expect("Failed to build document"), + ); + } + + // Set values used in the next iteration. + backlink = Some(encoded_entry.hash()); + previous = Some(DocumentViewId::from(encoded_entry.hash())); + } + // Push the final document to the documents vec. + documents.push(current_document.unwrap()); + } + } + documents +} + +/// Populate the store of a `TestNode` with entries and operations according to the passed config +/// and materialise the resulting documents. Additionally adds the relevant schema to the schema +/// provider. +/// +/// Returns the documents that were materialised. +pub async fn populate_and_materialize( + node: &mut TestNode, + config: &PopulateStoreConfig, +) -> Vec { + // Populate the store based with entries and operations based on the passed config. + let documents = populate_store(&node.context.store, config).await; + + // Add the passed schema to the schema store. + // + // Note: The entries and operations which would normally exist for this schema will NOT be + // present in the store, however the node will behave as expect as we directly inserted it into + // the schema provider. + let _ = node + .context + .schema_provider + .update(config.schema.clone()) + .await; + + // Iterate over documents and insert to the store. + for document in documents.iter() { + node.context + .store + .insert_document(document) + .await + .expect("Failed inserting document"); + } + documents +}