Skip to content

Commit

Permalink
Merge pull request #832 from llmware-ai/update-reqs-configs
Browse files Browse the repository at this point in the history
updating requirements configuration
  • Loading branch information
doberst authored Jun 4, 2024
2 parents fd318c6 + fb7a9e8 commit 9ba7a92
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 43 deletions.
3 changes: 2 additions & 1 deletion llmware/configs.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ class LLMWareConfig:
"shared_lib_path": os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib"),
"logging_level": logging.WARNING,
"logging_format": COLOR_WHITE + '%(levelname)-4s: %(message)s' + COLOR_RESET,
"logging_level_by_module": {"llmware.embeddings": 20, "llmware.models": 30, "llmware.agents":20},
"logging_level_by_module": {"llmware.embeddings": 20, "llmware.models": 30, "llmware.agents":20,
"llmware.prompts": 20},
"agent_writer_mode": "screen",
"agent_log_file": "agent_log.txt",
"model_register": {"module": "llmware.models", "class": "register"},
Expand Down
27 changes: 11 additions & 16 deletions llmware/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
from llmware.configs import LLMWareConfig

logger = logging.getLogger(__name__)
logger.setLevel(level=LLMWareConfig().get_logging_level_by_module(__name__))


class Prompt:
Expand Down Expand Up @@ -167,7 +168,7 @@ def __init__(self, llm_name=None, tokenizer=None, model_card=None, library=None,
new_prompt_id = PromptState(self).issue_new_prompt_id()
self.prompt_id = PromptState(self).initiate_new_state_session(new_prompt_id)

logger.info(f"update: creating new prompt id - {new_prompt_id}")
logger.debug(f"update: Prompt - creating new prompt id - {new_prompt_id}")

self.save_prompt_state = save_state

Expand Down Expand Up @@ -322,7 +323,7 @@ def register_llm_inference (self, ai_dict, prompt_id=None, trx_dict=None):
ai_dict.update({key:value})

# captures new interaction into the interaction history
logger.info(f"update: ai_dict getting registered - {ai_dict['event_type']}")
logger.debug(f"update: ai_dict getting registered - {ai_dict['event_type']}")

PromptState(self).register_interaction(ai_dict)
new_dialog = {"user": ai_dict["prompt"], "bot": ai_dict["llm_response"]}
Expand Down Expand Up @@ -435,7 +436,7 @@ def add_source_wikipedia(self, topic, article_count=3, query=None):
output = Utilities().fast_search_dicts(query, output, remove_stop_words=True)

for i, entries in enumerate(output):
logger.info(f"update: source entries - {i} - {entries}")
logger.debug(f"update: source entries - {i} - {entries}")

# step 2 - package wiki article results as source, loaded to prompt, and packaged as 'llm context'
sources = Sources(self).package_source(output,aggregate_source=True)
Expand All @@ -455,7 +456,7 @@ def add_source_yahoo_finance(self, ticker=None, key_list=None):

fin_info = YFinance().ticker(ticker).info

logger.info(f"update: fin_info - {fin_info}")
logger.debug(f"update: fin_info - {fin_info}")

output = ""
if key_list:
Expand All @@ -468,7 +469,7 @@ def add_source_yahoo_finance(self, ticker=None, key_list=None):

results = {"file_source": "yfinance-" + str(ticker), "page_num": "na", "text": output}

logger.info(f"update: yfinance results - {results}")
logger.debug(f"update: yfinance results - {results}")

# step 2 - package as source
sources = Sources(self).package_source([results], aggregate_source=True)
Expand Down Expand Up @@ -636,7 +637,7 @@ def prompt_with_source(self, prompt, prompt_name=None, source_id_list=None, firs

if temperature:
self.temperature = temperature

# this method assumes a 'closed context' with set of preloaded sources into the prompt
# if len(self.source_materials) == 0:
if not self.verify_source_materials_attached():
Expand Down Expand Up @@ -701,20 +702,13 @@ def prompt_with_source(self, prompt, prompt_name=None, source_id_list=None, firs
response_list.append(response_dict)

# log progress of iterations at info level
if not verbose:

logger.info(f"update: prompt_with_sources - iterating through batch - {i} of total "
f"{len(self.source_materials)} - {response_dict}")

logger.info(f"update: usage stats - {response_dict['usage']}")

else:
logger.info(f"update: iterating through source batches - {i} - {response_dict['llm_response']}")
if verbose:
logger.info(f"update: prompt_with_sources - iterating through source batches - {i} - {response_dict['llm_response']}")

# register inferences in state history, linked to prompt_id
for l, llm_inference in enumerate(response_list):

logger.info (f"update: llm inference - {l} - {len(response_list)} - {llm_inference}")
logger.debug (f"update: llm inference - {l} - {len(response_list)} - {llm_inference}")

self.register_llm_inference(llm_inference)

Expand Down Expand Up @@ -1074,6 +1068,7 @@ def summarize_document_fc(self, fp, fn, topic="key points", query=None, text_onl
self.source_materials = self.source_materials[0:max_batch_cap]

if real_time_update:

logger.info(f"update: Prompt - summarize_document_fc - number of source batches - "
f"{len(self.source_materials)}")

Expand Down
11 changes: 3 additions & 8 deletions llmware/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,18 +1,13 @@
boto3>=1.24.53
# core requirements for main llmware operations and use cases
# please see also requirements_extras.txt
boto3>=1.24.53
numpy>=1.23.2
openai>=1.0
pymongo>=4.7.0
torch>=1.13.1
transformers>=4.36.0
Wikipedia-API==0.6.0
psycopg-binary==3.1.17
psycopg==3.1.17
pgvector==0.2.4
colorama==0.4.6
einops==0.7.0
librosa>=0.10.0

tokenizers>=0.15.0
huggingface-hub>=0.19.4
requests>=2.31.0

14 changes: 14 additions & 0 deletions llmware/requirements_extras.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# requirements_extras - optional libraries most often used in conjunction with llmware
# this libraries will be installed with the welcome_to_llmware.sh script and are used in many examples

torch>=1.13.1
transformers>=4.36.0
einops==0.7.0
Wikipedia-API>=0.6.0
openai>=1.0
datasets>=2.15.0
yfinance>=0.2.28
pymilvus>=2.3.0
chromadb>=0.4.22
streamlit
Flask
12 changes: 5 additions & 7 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,27 +56,25 @@ def glob_fix(package_name, glob):
'boto3>=1.24.53',
'huggingface-hub>=0.19.4',
'numpy>=1.23.2',
'openai>=1.0.0',
'pymongo>=4.7.0',
'tokenizers>=0.15.0',
'torch>=1.13.1',
'transformers>=4.36.0',
'Wikipedia-API==0.6.0',
'psycopg-binary==3.1.17',
'psycopg==3.1.17',
'pgvector==0.2.4',
'colorama==0.4.6',
'einops==0.7.0',
'librosa>=0.10.0'
],

extras_require={
'milvus': ['pymilvus>=2.3.0'],
'chromadb': ['chromadb>=0.4.22'],
'pinecone': ['pinecone-client==3.0.0'],
'lancedb' :['lancedb==0.5.0'],
'lancedb': ['lancedb==0.5.0'],
'qdrant': ['qdrant-client==1.7.0'],
'redis': ['redis==5.0.1'],
'neo4j': ['neo4j==5.16.0']
'neo4j': ['neo4j==5.16.0'],
'full': ['torch>=1.13.1', 'transformers>=4.36.0', 'einops==0.7.0', 'Wikipedia-API>=0.6.0',
'openai>=1.0', 'datasets>=2.15.0', 'yfinance>=0.2.28', 'pymilvus>=2.3.0',
'chromadb>=0.4.22', 'streamlit', 'Flask']
},
)
11 changes: 5 additions & 6 deletions welcome_to_llmware.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
# Welcome to LLMWare script - handles some basic setup for first-time cloning of the repo
# Mac / Linux version

# Install dependencies, including several useful libraries used in examples
# Install core dependencies
pip3 install -r ./llmware/requirements.txt
pip3 install chromadb
pip3 install datasets
pip3 install yfinance
pip3 install streamlit

# # Note: this step is optional but adds many commonly-used optional dependencies (including in several examples)
pip3 install -r ./llmware/requirements_extras.txt

# Move selected examples into root path for easy execution from command line
scp ./examples/Getting_Started/welcome_example.py .
Expand Down Expand Up @@ -62,4 +61,4 @@ echo ""

# run welcome_example.py which serves as a test that the installation is successful
echo "Running welcome_example.py"
python3 welcome_example.py
python3 welcome_example.py
9 changes: 4 additions & 5 deletions welcome_to_llmware_windows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,11 @@
# Welcome to LLMWare script - handles some basic setup for first-time cloning of the repo
# Windows version

# Install dependencies, including several useful libraries used in examples
# Install core dependencies
pip3 install -r ./llmware/requirements.txt
pip3 install chromadb
pip3 install datasets
pip3 install yfinance
pip3 install streamlit

# Note: this step is optional but adds many commonly-used optional dependencies (including in several examples)
pip3 install -r ./llmware/requirements_extras.txt

# Move selected examples into root path for easy execution from command line
scp ./examples/Getting_Started/welcome_example.py .
Expand Down

0 comments on commit 9ba7a92

Please sign in to comment.