diff --git a/src/lib.rs b/src/lib.rs index 1717dd2..b66fea9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,8 +14,11 @@ //! Effective utilization of Loreweaver necessitates the implementation of the [`Config`] trait, //! ensuring the provision of mandatory types not preset by default. //! -//! For immediate use of this crate, a functioning Redis instance is necessary, with the following -//! environment variables set: +//! For immediate use of this library you must: +//! - open an account on [openai](https://platform.openai.com/) with an api key +//! - run a Redis instance +//! +//! You must set the following environment variables: //! //! - `OPENAI_API_KEY` //! - `REDIS_PROTOCOL` @@ -237,24 +240,24 @@ pub struct ContextMessage { pub timestamp: String, } -/// Represents a single part of a story containing a list of messages along with other metadata. +/// Represents a single part of a conversation containing a list of messages along with other +/// metadata. /// /// ChatGPT can only hold a limited amount of tokens in a the entire message history/context. -/// Therefore, at every [`Loom::prompt`] execution, we must keep track of the number of -/// `context_tokens` in the current story part and if it exceeds the maximum number of tokens -/// allowed for the current GPT [`Models`], then we must generate a summary of the current story -/// part and use that as the starting point for the next story part. This is one of the biggest -/// challenges for Loreweaver to keep a consistent narrative throughout the many story parts. +/// Therefore, at every [`Loom::prompt`] execution, the total number of `context_tokens` is tracked +/// and if it exceeds the maximum number of tokens allowed for the current GPT [`Models`], then we +/// must generate a summary of the `context_messages` and use that as the starting point for the +/// next `TapestryFragment`. #[derive(Debug, Serialize, Deserialize, Default, Clone)] pub struct TapestryFragment { - /// Total number of _GPT tokens_ in the story part. + /// Total number of _GPT tokens_ in the `context_messages`. pub context_tokens: Tokens, - /// List of [`ContextMessage`]s in the story part. + /// List of [`ContextMessage`]s that represents the message history. pub context_messages: Vec, } impl TapestryFragment { - /// Add a [`ContextMessage`] to the story part. + /// Add a [`ContextMessage`] to the `context_messages` list. /// /// Also increments the `context_tokens` by the number of tokens in the message. fn add_message(&mut self, msg: Vec) { @@ -325,11 +328,11 @@ pub trait Loom { /// Generates the summary of the current [`TapestryFragment`] instance. /// /// Returns a tuple of: - /// - The new story part to persist. + /// - The new [`TapestryFragment`] instance to save to storage. /// - The new request messages to prompt ChatGPT with. async fn generate_summary( max_tokens_with_summary: Tokens, - story_part: TapestryFragment, + tapestry_fragment: TapestryFragment, system_req_msg: Self::RequestMessages, ) -> Result<(TapestryFragment, Self::RequestMessages)>; } @@ -459,7 +462,7 @@ impl Loom for Loreweaver { }])?; // get latest tapestry fragment instance from storage - let story_part = T::TapestryChest::get_tapestry_fragment(tapestry_id.clone(), None) + let tapestry_fragment = T::TapestryChest::get_tapestry_fragment(tapestry_id.clone(), None) .await? .unwrap_or_default(); @@ -475,7 +478,7 @@ impl Loom for Loreweaver { let request_messages = system_req_msg .clone() .into_iter() - .chain(story_part.context_messages.clone().into_iter().flat_map( + .chain(tapestry_fragment.context_messages.clone().into_iter().flat_map( |msg: ContextMessage| { // Assuming build_messages returns a // Result> @@ -493,20 +496,21 @@ impl Loom for Loreweaver { // generate summary and start new tapestry instance if context tokens are exceed maximum + // the new message token count exceed the amount of allowed tokens let generate_summary = - max_tokens_with_summary <= story_part.context_tokens + msg.count_tokens(); - let (mut story_part_to_persist, mut request_messages) = match generate_summary { + max_tokens_with_summary <= tapestry_fragment.context_tokens + msg.count_tokens(); + let (mut tapestry_fragment_to_persist, mut request_messages) = match generate_summary { true => as Loom>::generate_summary( max_tokens_with_summary, - story_part, + tapestry_fragment, system_req_msg, ) .await?, - false => (story_part, request_messages), + false => (tapestry_fragment, request_messages), }; - let max_tokens = - max_tokens_with_summary - story_part_to_persist.context_tokens - msg.count_tokens(); + let max_tokens = max_tokens_with_summary - + tapestry_fragment_to_persist.context_tokens - + msg.count_tokens(); // add new user message to request_messages which will be used to prompt with // also include the system message to indicate how many words the response should be @@ -539,8 +543,8 @@ impl Loom for Loreweaver { e })?; - // persist new user message and response to the story_part - story_part_to_persist.add_message(vec![ + // persist new user message and response to the + tapestry_fragment_to_persist.add_message(vec![ build_context_message(USER_ROLE.to_string(), msg.clone(), account_id.clone()), build_context_message( ASSISTANT_ROLE.to_string(), @@ -549,18 +553,18 @@ impl Loom for Loreweaver { ), ]); - debug!("Saving story part: {:?}", story_part_to_persist); + debug!("Saving tapestry fragment: {:?}", tapestry_fragment_to_persist); // save tapestry fragment to storage - // when summarized, the story_part will be saved to a new instance of the tapestry fragment + // when summarized, the tapestry_fragment will be saved under a new instance T::TapestryChest::save_tapestry_fragment( tapestry_id, - story_part_to_persist, + tapestry_fragment_to_persist, generate_summary, ) .await .map_err(|e| { - error!("Failed to save story part: {}", e); + error!("Failed to save tapestry fragment: {}", e); e })?; @@ -617,10 +621,10 @@ impl Loom for Loreweaver { async fn generate_summary( max_tokens_with_summary: Tokens, - story_part: TapestryFragment, + tapestry_fragment: TapestryFragment, system_req_msg: LoomRequestMessages, ) -> Result<(TapestryFragment, LoomRequestMessages)> { - let tokens_left = max_tokens_with_summary.saturating_sub(story_part.context_tokens); + let tokens_left = max_tokens_with_summary.saturating_sub(tapestry_fragment.context_tokens); if tokens_left == 0 { return Err(Box::new(WeaveError::BadOpenAIRole(format!( "Tokens left cannot be 0: {}", diff --git a/src/storage.rs b/src/storage.rs index 1dfe03d..a2440b1 100644 --- a/src/storage.rs +++ b/src/storage.rs @@ -101,7 +101,7 @@ impl TapestryChestHandler for TapestryChest { Some(instance) => instance, None => { con.zadd(base_key, 0, 0).await.map_err(|e| { - error!("Failed to save story part to Redis: {}", e); + error!("Failed to save tapestry fragment to Redis: {}", e); StorageError::Redis(e) })?; @@ -113,7 +113,7 @@ impl TapestryChestHandler for TapestryChest { // this is to ensure that a we save and access new "instances" of TapestryFragments if increment { con.zincr(base_key, 0, 1).await.map_err(|e| { - error!("Failed to save story part to Redis: {}", e); + error!("Failed to save tapestry fragment to Redis: {}", e); StorageError::Redis(e) })?; @@ -135,7 +135,7 @@ impl TapestryChestHandler for TapestryChest { &key, "context_messages", serde_json::to_vec(&tapestry_fragment.context_messages).map_err(|e| { - error!("Failed to serialize story part context_messages: {}", e); + error!("Failed to serialize tapestry fragment context_messages: {}", e); StorageError::Parsing })?, ) @@ -164,7 +164,7 @@ impl TapestryChestHandler for TapestryChest { Some(instance) => instance, None => { con.zadd(base_key, 0, 0).await.map_err(|e| { - error!("Failed to save story part to Redis: {}", e); + error!("Failed to save tapestry fragment to Redis: {}", e); StorageError::Redis(e) })?; @@ -225,7 +225,7 @@ impl TapestryChestHandler for TapestryChest { serde_json::from_slice::>(&context_messages_raw).map_err( |e| { - error!("Failed to parse story part context_messages: {}", e); + error!("Failed to parse tapestry fragment context_messages: {}", e); StorageError::Parsing }, )? @@ -261,7 +261,7 @@ impl TapestryChestHandler for TapestryChest { })?; let tapestry_metadata = serde_json::from_slice::(&metadata_raw).map_err(|e| { - error!("Failed to parse story part context_messages: {}", e); + error!("Failed to parse tapestry fragment context_messages: {}", e); StorageError::Parsing })?; @@ -304,7 +304,7 @@ async fn get_score_from_last_zset_member( ) -> Result, StorageError> { debug!("Executing ZRANGE_WITHSCORES {}...", base_key); let member_score: Vec = con.zrange_withscores(base_key, -1, -1).await.map_err(|e| { - error!("Failed to save story part to Redis: {}", e); + error!("Failed to save tapestry fragment to Redis: {}", e); StorageError::Redis(e) })?; debug!("Result ZRANGE_WITHSCORES: {:?}", member_score);