generated from kyegomez/Python-Package-Template
-
-
Notifications
You must be signed in to change notification settings - Fork 8
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Your Name
committed
Sep 12, 2024
1 parent
277bbff
commit 1d6a0ca
Showing
41 changed files
with
564 additions
and
14 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
import os | ||
|
||
from swarm_models.models import Anthropic | ||
|
||
model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")) | ||
|
||
task = "What is quantum field theory? What are 3 books on the field?" | ||
|
||
print(model(task)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
from swarm_models.models import AzureOpenAI | ||
|
||
# Initialize Azure OpenAI | ||
model = AzureOpenAI() | ||
|
||
# Run the model | ||
model( | ||
"Create a youtube script for a video on how to use the swarms" | ||
" framework" | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
import os | ||
from dotenv import load_dotenv | ||
from swarm_models import AzureOpenAI | ||
|
||
# Load the environment variables | ||
load_dotenv() | ||
|
||
# Create an instance of the AzureOpenAI class | ||
model = AzureOpenAI( | ||
azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), | ||
deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"), | ||
openai_api_version=os.getenv("OPENAI_API_VERSION"), | ||
openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"), | ||
azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN"), | ||
) | ||
|
||
# Define the prompt | ||
prompt = ( | ||
"Analyze this load document and assess it for any risks and" | ||
" create a table in markdwon format." | ||
) | ||
|
||
# Generate a response | ||
response = model(prompt) | ||
print(response) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
from swarm_models.models import Cohere | ||
|
||
cohere = Cohere(model="command-light", cohere_api_key="") | ||
|
||
out = cohere("Hello, how are you?") |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
""" | ||
User task ->> GPT4 for prompt enrichment ->> Dalle3V for image generation | ||
->> GPT4Vision for image captioning ->> Dalle3 better image | ||
""" | ||
|
||
import os | ||
|
||
from swarm_models.dalle3 import Dalle3 | ||
|
||
api_key = os.environ["OPENAI_API_KEY"] | ||
|
||
dalle3 = Dalle3(openai_api_key=api_key, n=1) | ||
|
||
# task = "Swarm of robots working super industrial ambience concept art" | ||
|
||
# image_url = dalle3(task) | ||
|
||
tasks = ["A painting of a dog", "A painting of a cat"] | ||
results = dalle3.process_batch_concurrently(tasks) | ||
|
||
# print(results) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
from swarm_models.dalle3 import Dalle3 | ||
|
||
model = Dalle3() | ||
|
||
task = "A painting of a dog" | ||
img = model(task) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
from swarm_models import GPT4VisionAPI | ||
|
||
# Initialize with default API key and custom max_tokens | ||
api = GPT4VisionAPI(max_tokens=1000) | ||
|
||
# Define the task and image URL | ||
task = "Describe the scene in the image." | ||
img = ( | ||
"/home/kye/.swarms/swarms/examples/Screenshot from 2024-02-20" | ||
" 05-55-34.png" | ||
) | ||
|
||
# Run the GPT-4 Vision model | ||
response = api.run(task, img) | ||
|
||
# Print the model's response | ||
print(response) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
# Import the idefics model from the swarms.models module | ||
from swarm_models.models import Idefics | ||
|
||
# Create an instance of the idefics model | ||
model = Idefics() | ||
|
||
# Define user input with an image URL and chat with the model | ||
user_input = ( | ||
"User: What is in this image?" | ||
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" | ||
) | ||
response = model.chat(user_input) | ||
print(response) | ||
|
||
# Define another user input with an image URL and chat with the model | ||
user_input = ( | ||
"User: And who is that?" | ||
" https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" | ||
) | ||
response = model.chat(user_input) | ||
print(response) | ||
|
||
# Set the checkpoint of the model to "new_checkpoint" | ||
model.set_checkpoint("new_checkpoint") | ||
|
||
# Set the device of the model to "cpu" | ||
model.set_device("cpu") | ||
|
||
# Set the maximum length of the chat to 200 | ||
model.set_max_length(200) | ||
|
||
# Clear the chat history of the model | ||
model.clear_chat_history() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,10 @@ | ||
from swarm_models import Kosmos | ||
|
||
# Initialize the model | ||
model = Kosmos() | ||
|
||
# Generate | ||
out = model.run("Analyze the reciepts in this image", "docs.jpg") | ||
|
||
# Print the output | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
from swarm_models import QwenVLMultiModal | ||
|
||
# Instantiate the QwenVLMultiModal model | ||
model = QwenVLMultiModal( | ||
model_name="Qwen/Qwen-VL-Chat", | ||
device="cuda", | ||
quantize=True, | ||
) | ||
|
||
# Run the model | ||
response = model( | ||
"Hello, how are you?", "https://example.com/image.jpg" | ||
) | ||
|
||
# Print the response | ||
print(response) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
from swarm_models.popular_llms import Fireworks | ||
import os | ||
|
||
# Initialize the model | ||
llm = Fireworks( | ||
temperature=0.2, | ||
max_tokens=3500, | ||
openai_api_key=os.getenv("FIREWORKS_API_KEY"), | ||
) | ||
|
||
# Run the model | ||
response = llm("What is the meaning of life?") | ||
print(response) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
from swarm_models.fuyu import Fuyu | ||
|
||
fuyu = Fuyu() | ||
|
||
# This is the default image, you can change it to any image you want | ||
out = fuyu("What is this image?", "images/swarms.jpeg") | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
import os | ||
|
||
from dotenv import load_dotenv | ||
|
||
from swarm_models.gemini import Gemini | ||
|
||
load_dotenv() | ||
|
||
api_key = os.environ["GEMINI_API_KEY"] | ||
|
||
# Initialize the model | ||
model = Gemini(gemini_api_key=api_key) | ||
|
||
# Establish the prompt and image | ||
task = "What is your name" | ||
img = "images/github-banner-swarms.png" | ||
|
||
# Run the model | ||
out = model.run("What is your name?", img=img) | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
import os # Import the os module for working with the operating system | ||
|
||
from dotenv import ( | ||
load_dotenv, # Import the load_dotenv function from the dotenv module | ||
) | ||
|
||
from swarm_models import ( | ||
GPT4VisionAPI, # Import the GPT4VisionAPI class from the swarms module | ||
) | ||
|
||
# Load the environment variables | ||
load_dotenv() | ||
|
||
# Get the API key from the environment variables | ||
api_key = os.getenv("OPENAI_API_KEY") | ||
|
||
# Create an instance of the GPT4VisionAPI class with the API key and model name | ||
gpt4vision = GPT4VisionAPI( | ||
openai_api_key=api_key, | ||
model_name="gpt-4o", | ||
max_tokens=1000, | ||
openai_proxy="https://api.openai.com/v1/chat/completions", | ||
) | ||
|
||
# Define the URL of the image to analyze | ||
img = "ear.png" | ||
|
||
# Define the task to perform on the image | ||
task = "What is this image" | ||
|
||
# Run the GPT4VisionAPI on the image with the specified task | ||
answer = gpt4vision.run(task, img, return_json=True) | ||
|
||
# Print the answer | ||
print(answer) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
from swarm_models import OpenAIChat | ||
import os | ||
|
||
# Get the OpenAI API key from the environment variable | ||
api_key = os.getenv("OPENAI_API_KEY") | ||
|
||
# Create an instance of the OpenAIChat class | ||
model = OpenAIChat(api_key=api_key, model_name="gpt-4o-mini") | ||
|
||
# Query the model with a question | ||
out = model( | ||
"What is the best state to register a business in the US for the least amount of taxes?" | ||
) | ||
|
||
# Print the model's response | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,18 @@ | ||
import os | ||
from swarm_models import OpenAIChat | ||
|
||
|
||
# Example usage: | ||
api_key = os.getenv("GROQ_API_KEY") | ||
|
||
# Model | ||
model = OpenAIChat( | ||
openai_api_base="https://api.groq.com/openai/v1", | ||
openai_api_key=api_key, | ||
model_name="llama-3.1-70b-versatile", | ||
temperature=0.1, | ||
) | ||
|
||
|
||
out = model("What is the essence of quantum field theory?") | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
from swarm_models import Agent | ||
from swarm_models.prompts.finance_agent_sys_prompt import ( | ||
FINANCIAL_AGENT_SYS_PROMPT, | ||
) | ||
import torch | ||
from swarm_models import BaseLLM | ||
from transformers import AutoTokenizer, LlamaForCausalLM | ||
|
||
|
||
class NvidiaLlama31B(BaseLLM): | ||
# Load the tokenizer and model | ||
def __init__(self, max_tokens: int = 2048): | ||
self.max_tokens = max_tokens | ||
model_path = "nvidia/Llama-3.1-Minitron-4B-Width-Base" | ||
self.tokenizer = AutoTokenizer.from_pretrained(model_path) | ||
|
||
device = "cuda" | ||
dtype = torch.bfloat16 | ||
self.model = LlamaForCausalLM.from_pretrained( | ||
model_path, torch_dtype=dtype, device_map=device | ||
) | ||
|
||
def run(self, task: str): | ||
# Prepare the input text | ||
inputs = self.tokenizer.encode(task, return_tensors="pt").to( | ||
self.model.device | ||
) | ||
|
||
# Generate the output | ||
outputs = self.model.generate( | ||
inputs, max_length=self.max_tokens | ||
) | ||
|
||
# Decode and print the output | ||
output_text = self.tokenizer.decode(outputs[0]) | ||
print(output_text) | ||
|
||
return output_text | ||
|
||
|
||
# # Example usage: | ||
# model = NvidiaLlama31B() | ||
# out = model.run("What is the essence of quantum field theory?") | ||
# print(out) | ||
|
||
model = NvidiaLlama31B() | ||
|
||
# Initialize the agent | ||
agent = Agent( | ||
agent_name="Financial-Analysis-Agent_sas_chicken_eej", | ||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT, | ||
llm=model, | ||
max_loops=2, | ||
autosave=True, | ||
dashboard=False, | ||
verbose=True, | ||
dynamic_temperature_enabled=True, | ||
saved_state_path="finance_agent.json", | ||
user_name="swarms_corp", | ||
retry_attempts=1, | ||
context_length=200000, | ||
return_step_meta=True, | ||
disable_print_every_step=True, | ||
output_type="json", | ||
) | ||
|
||
|
||
out = agent.run( | ||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" | ||
) | ||
print(out) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
from swarm_models.models import HuggingfaceLLM | ||
|
||
model_id = "NousResearch/Yarn-Mistral-7b-128k" | ||
inference = HuggingfaceLLM(model_id=model_id) | ||
|
||
task = "Once upon a time" | ||
generated_text = inference(task) | ||
print(generated_text) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
# Import the idefics model from the swarms.models module | ||
from swarm_models.models import Idefics | ||
|
||
# Create an instance of the idefics model | ||
model = Idefics() | ||
|
||
# Define user input with an image URL and chat with the model | ||
user_input = ( | ||
"User: What is in this image?" | ||
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" | ||
) | ||
response = model.chat(user_input) | ||
print(response) | ||
|
||
# Define another user input with an image URL and chat with the model | ||
user_input = ( | ||
"User: And who is that?" | ||
" https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" | ||
) | ||
response = model.chat(user_input) | ||
print(response) | ||
|
||
# Set the checkpoint of the model to "new_checkpoint" | ||
model.set_checkpoint("new_checkpoint") | ||
|
||
# Set the device of the model to "cpu" | ||
model.set_device("cpu") | ||
|
||
# Set the maximum length of the chat to 200 | ||
model.set_max_length(200) | ||
|
||
# Clear the chat history of the model | ||
model.clear_chat_history() |
Oops, something went wrong.