Skip to content

Commit

Permalink
Create and org types for different LLM models and sizes
Browse files Browse the repository at this point in the history
  • Loading branch information
Xm0onh committed Jan 6, 2025
1 parent ecad72c commit d8eb33e
Show file tree
Hide file tree
Showing 7 changed files with 172 additions and 67 deletions.
3 changes: 2 additions & 1 deletion auto-agents-framework/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@ POST_INTERVAL_MINUTES=30
OPENAI_API_KEY=<openai_api_key>
ANTHROPIC_API_KEY=<anthropic_api_key>
LLAMA_API_URL=<llama_api_url>
# Config the models in src/config/llm.ts
# Config the models and sizes in src/config/llm.ts

# AutoDrive Configuration
AUTO_DRIVE_API_KEY=<auto_drive_api_key>
AUTO_DRIVE_ENCRYPTION_PASSWORD=<auto_drive_encryption_password>
Expand Down
17 changes: 5 additions & 12 deletions auto-agents-framework/src/agents/workflows/kol/workflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,7 @@ export const State = Annotation.Root({

const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConfig> => {
const { USERNAME, PASSWORD, COOKIES_PATH } = config.twitterConfig;
const {
DECISION_LLM_PROVIDER,
ANALYZE_LLM_PROVIDER,
GENERATION_LLM_PROVIDER,
RESPONSE_LLM_PROVIDER,
LARGE_LLM_MODEL,
SMALL_LLM_MODEL,
} = config.llmConfig;
const { nodes } = config.llmConfig;

const twitterApi = await createTwitterApi(USERNAME, PASSWORD, COOKIES_PATH);
const { tools } = createTools(twitterApi);
Expand All @@ -82,10 +75,10 @@ const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConf
toolNode,
prompts,
llms: {
decision: LLMFactory.createModel(DECISION_LLM_PROVIDER, SMALL_LLM_MODEL, 0.2),
analyze: LLMFactory.createModel(ANALYZE_LLM_PROVIDER, LARGE_LLM_MODEL, 0.5),
generation: LLMFactory.createModel(GENERATION_LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
response: LLMFactory.createModel(RESPONSE_LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
decision: LLMFactory.createModel(nodes.decision),
analyze: LLMFactory.createModel(nodes.analyze),
generation: LLMFactory.createModel(nodes.generation),
response: LLMFactory.createModel(nodes.response),
},
};
};
Expand Down
22 changes: 16 additions & 6 deletions auto-agents-framework/src/config/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,22 @@ export const config = (() => {
POST_INTERVAL_MS: (Number(process.env.POST_INTERVAL_MINUTES) || 90) * 60 * 1000,
},
llmConfig: {
DECISION_LLM_PROVIDER: llmConfig.DECISION_LLM_PROVIDER,
ANALYZE_LLM_PROVIDER: llmConfig.ANALYZE_LLM_PROVIDER,
GENERATION_LLM_PROVIDER: llmConfig.GENERATION_LLM_PROVIDER,
RESPONSE_LLM_PROVIDER: llmConfig.RESPONSE_LLM_PROVIDER,
LARGE_LLM_MODEL: llmConfig.LARGE_LLM_MODEL,
SMALL_LLM_MODEL: llmConfig.SMALL_LLM_MODEL,
configuration: {
large: {
provider: llmConfig.configuration.large.provider,
model: llmConfig.configuration.large.model,
},
small: {
provider: llmConfig.configuration.small.provider,
model: llmConfig.configuration.small.model,
},
},
nodes: {
decision: llmConfig.nodes.decision,
analyze: llmConfig.nodes.analyze,
generation: llmConfig.nodes.generation,
response: llmConfig.nodes.response,
},
OPENAI_API_KEY: process.env.OPENAI_API_KEY || '',
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY || '',
LLAMA_API_URL: process.env.LLAMA_API_URL || '',
Expand Down
36 changes: 30 additions & 6 deletions auto-agents-framework/src/config/llm.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,32 @@
import { LLMNodeConfiguration, LLMSize, LLMProvider, llmModels } from '../services/llm/types.js';

export const llmConfig = {
DECISION_LLM_PROVIDER: 'anthropic',
ANALYZE_LLM_PROVIDER: 'anthropic',
GENERATION_LLM_PROVIDER: 'anthropic',
RESPONSE_LLM_PROVIDER: 'anthropic',
LARGE_LLM_MODEL: 'claude-3-opus-20240229',
SMALL_LLM_MODEL: 'claude-3-5-sonnet-20240620',
configuration: {
large: {
provider: LLMProvider.ANTHROPIC,
model: llmModels.large.anthropic.claude3sonnet,
},
small: {
provider: LLMProvider.OPENAI,
model: llmModels.small.openai.gpt_4o_mini,
},
},
nodes: {
decision: {
size: LLMSize.SMALL,
temperature: 0.2,
} as LLMNodeConfiguration,
analyze: {
size: LLMSize.LARGE,
temperature: 0.5,
} as LLMNodeConfiguration,
generation: {
size: LLMSize.LARGE,
temperature: 0.8,
} as LLMNodeConfiguration,
response: {
size: LLMSize.SMALL,
temperature: 0.8,
} as LLMNodeConfiguration,
},
};
67 changes: 49 additions & 18 deletions auto-agents-framework/src/config/schema.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { z } from 'zod';
import { LLMSize, LLMProvider } from '../services/llm/types.js';

const twitterConfigSchema = z.object({
USERNAME: z.string().min(1, 'Twitter username is required'),
Expand All @@ -18,33 +19,63 @@ const twitterConfigSchema = z.object({

const llmConfigSchema = z
.object({
DECISION_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
ANALYZE_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
GENERATION_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
RESPONSE_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
SMALL_LLM_MODEL: z.string().min(1),
LARGE_LLM_MODEL: z.string().min(1),
OPENAI_API_KEY: z.string().optional(),
ANTHROPIC_API_KEY: z.string().optional(),
LLAMA_API_URL: z.string().optional(),
configuration: z.object({
large: z.object({
provider: z.nativeEnum(LLMProvider),
model: z.string(),
}),
small: z.object({
provider: z.nativeEnum(LLMProvider),
model: z.string(),
}),
}),
nodes: z.object({
decision: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
analyze: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
generation: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
response: z.object({
size: z.nativeEnum(LLMSize),
temperature: z.number(),
}),
}),
OPENAI_API_KEY: z.string(),
ANTHROPIC_API_KEY: z.string(),
LLAMA_API_URL: z.string(),
})
.superRefine((data, ctx) => {
const providers = [
data.DECISION_LLM_PROVIDER,
data.ANALYZE_LLM_PROVIDER,
data.GENERATION_LLM_PROVIDER,
data.RESPONSE_LLM_PROVIDER,
];
const providers = new Set([
data.nodes.decision.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.analyze.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.generation.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
data.nodes.response.size === LLMSize.LARGE
? data.configuration.large.provider
: data.configuration.small.provider,
]);

const missingConfigs = [];

if (providers.includes('openai') && !data.OPENAI_API_KEY) {
if (providers.has(LLMProvider.OPENAI) && !data.OPENAI_API_KEY) {
missingConfigs.push('OpenAI API key');
}
if (providers.includes('anthropic') && !data.ANTHROPIC_API_KEY) {
if (providers.has(LLMProvider.ANTHROPIC) && !data.ANTHROPIC_API_KEY) {
missingConfigs.push('Anthropic API key');
}
if (providers.includes('llama') && !data.LLAMA_API_URL) {
if (providers.has(LLMProvider.OLLAMA) && !data.LLAMA_API_URL) {
missingConfigs.push('Llama API URL');
}

Expand Down
43 changes: 20 additions & 23 deletions auto-agents-framework/src/services/llm/factory.ts
Original file line number Diff line number Diff line change
@@ -1,41 +1,38 @@
import { LLMProvider } from './types.js';
import { ChatOpenAI } from '@langchain/openai';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatOllama } from '@langchain/ollama';
import { config } from '../../config/index.js';
import { LLMProvider, LLMConfiguration, LLMNodeConfiguration } from './types.js';
import { llmConfig } from '../../config/llm.js';
import { config as appConfig } from '../../config/index.js';

export class LLMFactory {
static createModel(
provider: LLMProvider,
model: string,
temperature: number,
): ChatOpenAI | ChatAnthropic | ChatOllama {
switch (provider) {
case 'openai':
static createModel(node: LLMNodeConfiguration) {
const cfg = llmConfig.configuration[node.size];
return this.createModelFromConfig(cfg, node.temperature);
}

static createModelFromConfig(config: LLMConfiguration, temperature: number) {
switch (config.provider) {
case LLMProvider.OPENAI:
return new ChatOpenAI({
apiKey: config.llmConfig.OPENAI_API_KEY,
model,
apiKey: appConfig.llmConfig.OPENAI_API_KEY,
model: config.model,
temperature,
});

case 'anthropic':
case LLMProvider.ANTHROPIC:
return new ChatAnthropic({
apiKey: config.llmConfig.ANTHROPIC_API_KEY,
model,
apiKey: appConfig.llmConfig.ANTHROPIC_API_KEY,
model: config.model,
temperature,
});

case 'llama':
case LLMProvider.OLLAMA:
return new ChatOllama({
baseUrl: config.llmConfig.LLAMA_API_URL || '',
model,
baseUrl: appConfig.llmConfig.LLAMA_API_URL,
model: config.model,
temperature,
format: 'json',
maxRetries: 3,
});

default:
throw new Error(`Unsupported LLM provider: ${provider}`);
throw new Error(`Unsupported LLM provider: ${config.provider}`);
}
}
}
51 changes: 50 additions & 1 deletion auto-agents-framework/src/services/llm/types.ts
Original file line number Diff line number Diff line change
@@ -1 +1,50 @@
export type LLMProvider = 'openai' | 'anthropic' | 'llama';
export enum LLMProvider {
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
OLLAMA = 'ollama',
}

export type LLMConfiguration = {
provider: LLMProvider;
model: string;
};

export enum LLMSize {
SMALL = 'small',
LARGE = 'large',
}

export type LLMNodeConfiguration = {
size: LLMSize;
temperature: number;
};

export const llmModels = {
large: {
anthropic: {
claude3opus: 'claude-3-opus-20240229',
claude3sonnet: 'claude-3-sonnet-20240229',
},
openai: {
gpt4turbo: 'gpt-4-turbo',
gpt4: 'gpt-4',
},
//placeholder
ollama: {
llama3: 'llama3.1',
},
},
small: {
openai: {
gpt_4o_mini: 'gpt-4o-mini',
gpt35turbo: 'gpt-3.5-turbo',
},
anthropic: {
claude3haiku: 'claude-3-haiku-20240307',
},
//placeholder
ollama: {
llama3: 'llama3.1',
},
},
};

0 comments on commit d8eb33e

Please sign in to comment.