Skip to content

Commit

Permalink
Diversify LLM models for each node
Browse files Browse the repository at this point in the history
  • Loading branch information
Xm0onh committed Jan 6, 2025
1 parent eae6e9e commit d445a06
Show file tree
Hide file tree
Showing 4 changed files with 62 additions and 17 deletions.
5 changes: 4 additions & 1 deletion auto-agents-framework/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@ RESPONSE_INTERVAL_MINUTES=26
POST_INTERVAL_MINUTES=30

# LLM Configuration
LLM_PROVIDER=openai # or anthropic or llama
DECISION_LLM_PROVIDER=openai # or anthropic or llama
ANALYZE_LLM_PROVIDER=openai # or anthropic or llama
GENERATION_LLM_PROVIDER=anthropic # or anthropic or llama
RESPONSE_LLM_PROVIDER=anthropic # or anthropic or llama
OPENAI_API_KEY=<openai_api_key>
ANTHROPIC_API_KEY=<anthropic_api_key>
LLAMA_API_URL=<llama_api_url>
Expand Down
17 changes: 12 additions & 5 deletions auto-agents-framework/src/agents/workflows/kol/workflow.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,14 @@ export const State = Annotation.Root({

const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConfig> => {
const { USERNAME, PASSWORD, COOKIES_PATH } = config.twitterConfig;
const { LLM_PROVIDER, LARGE_LLM_MODEL, SMALL_LLM_MODEL } = config.llmConfig;
const {
DECISION_LLM_PROVIDER,
ANALYZE_LLM_PROVIDER,
GENERATION_LLM_PROVIDER,
RESPONSE_LLM_PROVIDER,
LARGE_LLM_MODEL,
SMALL_LLM_MODEL,
} = config.llmConfig;

const twitterApi = await createTwitterApi(USERNAME, PASSWORD, COOKIES_PATH);
const { tools } = createTools(twitterApi);
Expand All @@ -75,10 +82,10 @@ const createWorkflowConfig = async (characterFile: string): Promise<WorkflowConf
toolNode,
prompts,
llms: {
decision: LLMFactory.createModel(LLM_PROVIDER, SMALL_LLM_MODEL, 0.2),
analyze: LLMFactory.createModel(LLM_PROVIDER, LARGE_LLM_MODEL, 0.5),
generation: LLMFactory.createModel(LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
response: LLMFactory.createModel(LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
decision: LLMFactory.createModel(DECISION_LLM_PROVIDER, SMALL_LLM_MODEL, 0.2),
analyze: LLMFactory.createModel(ANALYZE_LLM_PROVIDER, LARGE_LLM_MODEL, 0.5),
generation: LLMFactory.createModel(GENERATION_LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
response: LLMFactory.createModel(RESPONSE_LLM_PROVIDER, LARGE_LLM_MODEL, 0.8),
},
};
};
Expand Down
9 changes: 6 additions & 3 deletions auto-agents-framework/src/config/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,12 @@ export const config = (() => {
POST_INTERVAL_MS: (Number(process.env.POST_INTERVAL_MINUTES) || 90) * 60 * 1000,
},
llmConfig: {
LLM_PROVIDER: process.env.LLM_PROVIDER || 'openai',
LARGE_LLM_MODEL: process.env.LARGE_LLM_MODEL || 'gpt-4o',
SMALL_LLM_MODEL: process.env.SMALL_LLM_MODEL || 'gpt-4o-mini',
DECISION_LLM_PROVIDER: process.env.DECISION_LLM_PROVIDER || 'openai',
ANALYZE_LLM_PROVIDER: process.env.ANALYZE_LLM_PROVIDER || 'openai',
GENERATION_LLM_PROVIDER: process.env.GENERATION_LLM_PROVIDER || 'anthropic',
RESPONSE_LLM_PROVIDER: process.env.RESPONSE_LLM_PROVIDER || 'anthropic',
LARGE_LLM_MODEL: process.env.LARGE_LLM_MODEL || 'gpt-4',
SMALL_LLM_MODEL: process.env.SMALL_LLM_MODEL || 'gpt-4-turbo',
OPENAI_API_KEY: process.env.OPENAI_API_KEY || '',
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY || '',
LLAMA_API_URL: process.env.LLAMA_API_URL || '',
Expand Down
48 changes: 40 additions & 8 deletions auto-agents-framework/src/config/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,46 @@ const twitterConfigSchema = z.object({
POST_INTERVAL_MS: z.number().int().positive(),
});

const llmConfigSchema = z.object({
LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
LARGE_LLM_MODEL: z.string().min(1),
SMALL_LLM_MODEL: z.string().min(1),
OPENAI_API_KEY: z.string().optional(),
ANTHROPIC_API_KEY: z.string().optional(),
LLAMA_API_URL: z.string().optional(),
});
const llmConfigSchema = z
.object({
DECISION_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
ANALYZE_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
GENERATION_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
RESPONSE_LLM_PROVIDER: z.enum(['openai', 'anthropic', 'llama']).default('openai'),
SMALL_LLM_MODEL: z.string().min(1),
LARGE_LLM_MODEL: z.string().min(1),
OPENAI_API_KEY: z.string().optional(),
ANTHROPIC_API_KEY: z.string().optional(),
LLAMA_API_URL: z.string().optional(),
})
.superRefine((data, ctx) => {
const providers = [
data.DECISION_LLM_PROVIDER,
data.ANALYZE_LLM_PROVIDER,
data.GENERATION_LLM_PROVIDER,
data.RESPONSE_LLM_PROVIDER,
];

const missingConfigs = [];

if (providers.includes('openai') && !data.OPENAI_API_KEY) {
missingConfigs.push('OpenAI API key');
}
if (providers.includes('anthropic') && !data.ANTHROPIC_API_KEY) {
missingConfigs.push('Anthropic API key');
}
if (providers.includes('llama') && !data.LLAMA_API_URL) {
missingConfigs.push('Llama API URL');
}

if (missingConfigs.length > 0) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: `Missing required configs: ${missingConfigs.join(', ')}`,
path: ['llmConfig'],
});
}
});

const autoDriveConfigSchema = z.object({
AUTO_DRIVE_API_KEY: z.string().optional(),
Expand Down

0 comments on commit d445a06

Please sign in to comment.