diff --git a/templates/rag-conversation/README.md b/templates/rag-conversation/README.md index 29e85483c4878..43f4a12e3b520 100644 --- a/templates/rag-conversation/README.md +++ b/templates/rag-conversation/README.md @@ -1,4 +1,4 @@ -# Conversational RAG +# Conversational RAG This template performs [conversational](https://python.langchain.com/docs/expression_language/cookbook/retrieval#conversational-retrieval-chain) [retrieval](https://python.langchain.com/docs/use_cases/question_answering/), which is one of the most popular LLM use-cases. @@ -10,4 +10,4 @@ Be sure that `OPENAI_API_KEY` is set in order to use the OpenAI models. ## Pinecone -Be sure that `PINECONE_API_KEY` is set in order to use Pinecone. +This template uses Pinecone as a vectorstore and requires that `PINECONE_API_KEY`, `PINECONE_ENVIRONMENT`, and `PINECONE_INDEX` are set. diff --git a/templates/rag-conversation/rag_conversation/chain.py b/templates/rag-conversation/rag_conversation/chain.py index f6ad06164d2f6..a0f0be1ba4c9c 100644 --- a/templates/rag-conversation/rag_conversation/chain.py +++ b/templates/rag-conversation/rag_conversation/chain.py @@ -1,3 +1,4 @@ +import os from typing import Tuple, List from pydantic import BaseModel from operator import itemgetter @@ -10,6 +11,14 @@ from langchain.schema.output_parser import StrOutputParser from langchain.schema.runnable import RunnablePassthrough, RunnableBranch, RunnableLambda, RunnableMap +if os.environ.get("PINECONE_API_KEY", None) is None: + raise Exception("Missing `PINECONE_API_KEY` environment variable.") + +if os.environ.get("PINECONE_ENVIRONMENT", None) is None: + raise Exception("Missing `PINECONE_ENVIRONMENT` environment variable.") + +PINECONE_INDEX_NAME = os.environ.get("PINECONE_INDEX", "langchain-test") + ### Ingest code - you may need to run this the first time # Load # from langchain.document_loaders import WebBaseLoader @@ -20,14 +29,14 @@ # from langchain.text_splitter import RecursiveCharacterTextSplitter # text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0) # all_splits = text_splitter.split_documents(data) -# + # # Add to vectorDB # vectorstore = Pinecone.from_documents( -# documents=all_splits, embedding=OpenAIEmbeddings(), index_name='langchain-test' +# documents=all_splits, embedding=OpenAIEmbeddings(), index_name=PINECONE_INDEX_NAME # ) # retriever = vectorstore.as_retriever() -vectorstore = Pinecone.from_existing_index("langchain-test", OpenAIEmbeddings()) +vectorstore = Pinecone.from_existing_index(PINECONE_INDEX_NAME, OpenAIEmbeddings()) retriever = vectorstore.as_retriever() # Condense a chat history and follow-up question into a standalone question @@ -62,9 +71,9 @@ def _format_chat_history(chat_history: List[Tuple[str, str]]) -> List: buffer.append(AIMessage(content=ai)) return buffer -# User input +# User input class ChatHistory(BaseModel): - chat_history: List[Tuple[str, str]] + chat_history: List[Tuple[str, str]] question: str