Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feat/agent frame mul llm #112

Merged
merged 11 commits into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions auto-agents-framework/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,10 @@ RESPONSE_INTERVAL_MINUTES=26
POST_INTERVAL_MINUTES=30

# LLM Configuration
LARGE_LLM_MODEL=<large_llm_model>
SMALL_LLM_MODEL=<small_llm_model>
OPENAI_API_KEY=<openai_api_key>
ANTHROPIC_API_KEY=<anthropic_api_key>
LLAMA_API_URL=<llama_api_url>
# Config the models and sizes in src/config/llm.ts

# AutoDrive Configuration
AUTO_DRIVE_API_KEY=<auto_drive_api_key>
Expand Down
4 changes: 3 additions & 1 deletion auto-agents-framework/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,11 @@
"dependencies": {
"@autonomys/auto-dag-data": "1.2.1",
"@autonomys/auto-drive": "1.2.1",
"@langchain/anthropic": "^0.3.11",
"@langchain/community": "0.3.20",
"@langchain/core": "0.3.19",
"@langchain/core": "^0.3.27",
"@langchain/langgraph": "0.2.36",
"@langchain/ollama": "^0.1.4",
"@langchain/openai": "0.3.16",
"agent-twitter-client": "0.0.18",
"dotenv": "^16.3.1",
Expand Down
10 changes: 6 additions & 4 deletions auto-agents-framework/src/agents/workflows/kol/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ import { z } from 'zod';
import { Tweet, TwitterApi } from '../../../services/twitter/types.js';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import { ChatOpenAI } from '@langchain/openai';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatOllama } from '@langchain/ollama';
import { Runnable } from '@langchain/core/runnables';
import { engagementSchema, responseSchema, skippedEngagementSchema, dsnTweet } from './schemas.js';
import { ChatPromptTemplate } from '@langchain/core/prompts';
Expand All @@ -17,10 +19,10 @@ export type WorkflowConfig = Readonly<{
twitterApi: TwitterApi;
toolNode: ToolNode;
llms: Readonly<{
decision: ChatOpenAI;
analyze: ChatOpenAI;
generation: Runnable<BaseLanguageModelInput, AIMessageChunk, ChatOpenAI>;
response: Runnable<BaseLanguageModelInput, AIMessageChunk, ChatOpenAI>;
decision: ChatOpenAI | ChatAnthropic | ChatOllama;
analyze: ChatOpenAI | ChatAnthropic | ChatOllama;
generation: ChatOpenAI | ChatAnthropic | ChatOllama;
response: ChatOpenAI | ChatAnthropic | ChatOllama;
}>;
prompts: Readonly<{
engagementPrompt: ChatPromptTemplate;
Expand Down
23 changes: 6 additions & 17 deletions auto-agents-framework/src/agents/workflows/kol/workflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import { Tweet } from '../../../services/twitter/types.js';
import { trendSchema, summarySchema } from './schemas.js';
import { z } from 'zod';
import { createPrompts } from './prompts.js';
import { LLMFactory } from '../../../services/llm/factory.js';

export const logger = createLogger('agent-workflow');

Expand Down Expand Up @@ -62,7 +63,7 @@ export const State = Annotation.Root({

const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConfig> => {
const { USERNAME, PASSWORD, COOKIES_PATH } = config.twitterConfig;
const { LARGE_LLM_MODEL, SMALL_LLM_MODEL } = config.llmConfig;
const { nodes } = config.llmConfig;

const twitterApi = await createTwitterApi(USERNAME, PASSWORD, COOKIES_PATH);
const { tools } = createTools(twitterApi);
Expand All @@ -74,22 +75,10 @@ const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConf
toolNode,
prompts,
llms: {
decision: new ChatOpenAI({
modelName: SMALL_LLM_MODEL,
temperature: 0.2,
}),
analyze: new ChatOpenAI({
modelName: LARGE_LLM_MODEL,
temperature: 0.5,
}),
generation: new ChatOpenAI({
modelName: LARGE_LLM_MODEL,
temperature: 0.8,
}),
response: new ChatOpenAI({
modelName: LARGE_LLM_MODEL,
temperature: 0.8,
}),
decision: LLMFactory.createModel(nodes.decision),
analyze: LLMFactory.createModel(nodes.analyze),
generation: LLMFactory.createModel(nodes.generation),
response: LLMFactory.createModel(nodes.response),
},
};
};
Expand Down
21 changes: 19 additions & 2 deletions auto-agents-framework/src/config/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { configSchema } from './schema.js';
import path from 'path';
import { fileURLToPath } from 'url';
import { mkdir } from 'fs/promises';
import { llmConfig } from './llm.js';

const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
Expand Down Expand Up @@ -52,9 +53,25 @@ export const config = (() => {
POST_INTERVAL_MS: (Number(process.env.POST_INTERVAL_MINUTES) || 90) * 60 * 1000,
},
llmConfig: {
LARGE_LLM_MODEL: process.env.LARGE_LLM_MODEL || 'gpt-4o',
SMALL_LLM_MODEL: process.env.SMALL_LLM_MODEL || 'gpt-4o-mini',
configuration: {
large: {
provider: llmConfig.configuration.large.provider,
model: llmConfig.configuration.large.model,
},
small: {
provider: llmConfig.configuration.small.provider,
model: llmConfig.configuration.small.model,
},
},
nodes: {
decision: llmConfig.nodes.decision,
analyze: llmConfig.nodes.analyze,
generation: llmConfig.nodes.generation,
response: llmConfig.nodes.response,
},
OPENAI_API_KEY: process.env.OPENAI_API_KEY || '',
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY || '',
LLAMA_API_URL: process.env.LLAMA_API_URL || '',
},
autoDriveConfig: {
AUTO_DRIVE_API_KEY: process.env.AUTO_DRIVE_API_KEY,
Expand Down
32 changes: 32 additions & 0 deletions auto-agents-framework/src/config/llm.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import { LLMNodeConfiguration, LLMSize, LLMProvider, llmModels } from '../services/llm/types.js';

export const llmConfig = {
configuration: {
large: {
provider: LLMProvider.ANTHROPIC,
model: llmModels.large.anthropic.claude3sonnet,
},
small: {
provider: LLMProvider.OPENAI,
model: llmModels.small.openai.gpt_4o_mini,
},
},
nodes: {
decision: {
size: LLMSize.SMALL,
temperature: 0.2,
} as LLMNodeConfiguration,
analyze: {
size: LLMSize.LARGE,
temperature: 0.5,
} as LLMNodeConfiguration,
generation: {
size: LLMSize.LARGE,
temperature: 0.8,
} as LLMNodeConfiguration,
response: {
size: LLMSize.SMALL,
temperature: 0.8,
} as LLMNodeConfiguration,
},
};
76 changes: 71 additions & 5 deletions auto-agents-framework/src/config/schema.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { z } from 'zod';
import { LLMSize, LLMProvider } from '../services/llm/types.js';

const twitterConfigSchema = z.object({
USERNAME: z.string().min(1, 'Twitter username is required'),
Expand All @@ -16,11 +17,76 @@ const twitterConfigSchema = z.object({
POST_INTERVAL_MS: z.number().int().positive(),
});

const llmConfigSchema = z.object({
LARGE_LLM_MODEL: z.string().min(1),
SMALL_LLM_MODEL: z.string().min(1),
OPENAI_API_KEY: z.string().min(1, 'OpenAI API key is required'),
});
const llmConfigSchema = z
.object({
configuration: z.object({
large: z.object({
provider: z.nativeEnum(LLMProvider),
model: z.string(),
}),
small: z.object({
provider: z.nativeEnum(LLMProvider),
model: z.string(),
}),
}),
nodes: z.object({
decision: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
analyze: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
generation: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
response: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
}),
OPENAI_API_KEY: z.string(),
ANTHROPIC_API_KEY: z.string(),
LLAMA_API_URL: z.string(),
})
.superRefine((data, ctx) => {
const providers = new Set([
data.nodes.decision.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.analyze.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.generation.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.response.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
]);

const missingConfigs = [];

if (providers.has(LLMProvider.OPENAI) && !data.OPENAI_API_KEY) {
missingConfigs.push('OpenAI API key');
}
if (providers.has(LLMProvider.ANTHROPIC) && !data.ANTHROPIC_API_KEY) {
missingConfigs.push('Anthropic API key');
}
if (providers.has(LLMProvider.OLLAMA) && !data.LLAMA_API_URL) {
missingConfigs.push('Llama API URL');
}

if (missingConfigs.length > 0) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Missing required configs: ${missingConfigs.join(', ')}`,
path: ['llmConfig'],
});
}
});

const autoDriveConfigSchema = z.object({
AUTO_DRIVE_API_KEY: z.string().optional(),
Expand Down
38 changes: 38 additions & 0 deletions auto-agents-framework/src/services/llm/factory.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import { ChatOpenAI } from '@langchain/openai';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatOllama } from '@langchain/ollama';
import { LLMProvider, LLMConfiguration, LLMNodeConfiguration } from './types.js';
import { llmConfig } from '../../config/llm.js';
import { config as appConfig } from '../../config/index.js';

export class LLMFactory {
static createModel(node: LLMNodeConfiguration) {
const cfg = llmConfig.configuration[node.size];
return this.createModelFromConfig(cfg, node.temperature);
}

static createModelFromConfig(config: LLMConfiguration, temperature: number) {
switch (config.provider) {
case LLMProvider.OPENAI:
return new ChatOpenAI({
apiKey: appConfig.llmConfig.OPENAI_API_KEY,
model: config.model,
temperature,
});
case LLMProvider.ANTHROPIC:
return new ChatAnthropic({
apiKey: appConfig.llmConfig.ANTHROPIC_API_KEY,
model: config.model,
temperature,
});
case LLMProvider.OLLAMA:
return new ChatOllama({
baseUrl: appConfig.llmConfig.LLAMA_API_URL,
model: config.model,
temperature,
});
default:
throw new Error(`Unsupported LLM provider: ${config.provider}`);
}
}
}
50 changes: 50 additions & 0 deletions auto-agents-framework/src/services/llm/types.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
export enum LLMProvider {
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
OLLAMA = 'ollama',
}

export type LLMConfiguration = {
provider: LLMProvider;
model: string;
};

export enum LLMSize {
SMALL = 'small',
LARGE = 'large',
}

export type LLMNodeConfiguration = {
size: LLMSize;
temperature: number;
};

export const llmModels = {
large: {
anthropic: {
claude3opus: 'claude-3-opus-20240229',
claude3sonnet: 'claude-3-sonnet-20240229',
Xm0onh marked this conversation as resolved.
Show resolved Hide resolved
},
openai: {
gpt4turbo: 'gpt-4-turbo',
gpt4: 'gpt-4',
Xm0onh marked this conversation as resolved.
Show resolved Hide resolved
},
//placeholder
ollama: {
llama3: 'llama3.1',
},
},
small: {
openai: {
gpt_4o_mini: 'gpt-4o-mini',
gpt35turbo: 'gpt-3.5-turbo',
Xm0onh marked this conversation as resolved.
Show resolved Hide resolved
},
anthropic: {
claude3haiku: 'claude-3-haiku-20240307',
Xm0onh marked this conversation as resolved.
Show resolved Hide resolved
},
//placeholder
ollama: {
llama3: 'llama3.1',
},
},
};
Loading
Loading