Skip to content

Commit

Permalink
Merge pull request #30 from startmunich/27-add-openai-api-key-and-cli…
Browse files Browse the repository at this point in the history
…ents-in-code

first try of openAI
  • Loading branch information
RobinFrasch authored Jun 5, 2024
2 parents d4d2762 + fdebde2 commit ab14f07
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 49 deletions.
64 changes: 17 additions & 47 deletions apps/slackbot/ai_fcts.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
import os
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain_community.llms import Replicate
from openai import OpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chains import RetrievalQA
from langchain_community.vectorstores.qdrant import Qdrant
from qdrant_client import QdrantClient
from langchain_community.embeddings import InfinityEmbeddings
from langchain import hub
from datetime import date



Expand All @@ -18,8 +15,6 @@
# load_dotenv(dotenv_path="apps/slackbot/.env.local")

# trigger new build

# REPLICATE_API_TOKEN = os.environ["REPLICATE_API_KEY"]
qdrant_uri = os.environ["QDRANT_URL"]
qdrant_collection_name = os.environ.get("QDRANT_COLLECTION_NAME")
infinity_api_url = os.environ.get("INFINITY_URL")
Expand All @@ -40,45 +35,11 @@
retriever = qdrant_db.as_retriever(search_kwargs={"k": 3})

# Create the language model
llm = Replicate(
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
model=llm_model,
model_kwargs={"temperature": 0.1, "max_length": 1500, "top_p": 0.9, "top_k": 50, "max_new_tokens": 400,
"min_new_tokens": 20, "repetition_penalty": 0.1},
verbose = False
)



# Create the prompt template
prompt_template = """ [INST]
You are StartGPT, an assistant for question-answering tasks.
The context you get will be from our Notion, Website and Slack. Use this context to answer the question.
If you utilize context from Notion to answer the question, please provide the source link in your answer.
If you utilize context from slack or the website, do not provide a link.
<Beginning of context>
{context}
<End of context>
client = OpenAI(
api_key=os.environ.get("OPENAI_KEY"),
)

<Beginning of question>
{question}
<End of question>
[INST]
"""

# Initialize prompt
prompt_template = PromptTemplate(input_variables=["question", "context"], template=prompt_template)

# prompt = hub.pull("rlm/rag-prompt")

# qa_chain = RetrievalQA.from_chain_type(
# llm=llm,
# retriever=retriever,
# chain_type_kwargs={"prompt": prompt_template},
# verbose=False
# )
def format_docs(docs):
context = ""
for index, doc in enumerate(docs):
Expand All @@ -98,10 +59,19 @@ def get_answer(query: str) -> str:
context = format_docs(docs)

# enter context into the prompt

prompt = prompt_template.format(context=context, question=query)

response = llm.invoke(prompt)
completion = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": """You are StartGPT, an assistant for question-answering tasks.
The context you get will be from our Notion, Website and Slack. Use this context to answer the question.
If you utilize context from Notion to answer the question, please provide the source link in your answer.
If you utilize context from slack or the website, do not provide a link."""}, # <-- This is the system message that provides context to the model
{"role": "user", "content": f"""<Beginning of context> {context} <End of context> \n
<Beginning of question> {query} <End of question>"""} # <-- This is the user message for which the model will generate a response
]
)

response = completion.choices[0].message.content
# print(f"Retrieved docs: {docs}\n Retrieved context: {context}\n Generated response: {response}")

return f"{response}"
Expand Down
2 changes: 1 addition & 1 deletion apps/slackbot/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ python-dotenv~=1.0.1
langchain == 0.1.12
langchain_community
langchainhub
replicate == 0.24.0
openai == 1.3.7
qdrant-client == 1.6.4
2 changes: 1 addition & 1 deletion infrastructure/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ services:
INFINITY_MODEL: BAAI/bge-large-en-v1.5
SLACK_APP_TOKEN: $SLACK_APP_TOKEN
SLACK_BOT_TOKEN: $SLACK_BOT_TOKEN
REPLICATE_API_TOKEN: $REPLICATE_API_TOKEN
OPENAI_KEY: $OPENAI_KEY
LLM_MODEL: mistralai/mixtral-8x7b-instruct-v0.1:2b56576fcfbe32fa0526897d8385dd3fb3d36ba6fd0dbe033c72886b81ade93e

webcrawler:
Expand Down

0 comments on commit ab14f07

Please sign in to comment.