From 76172511fd1739016895413f6a656e3ffd3c8a32 Mon Sep 17 00:00:00 2001 From: pm390 <56439961+pm390@users.noreply.github.com> Date: Tue, 14 Jan 2025 21:53:37 +0100 Subject: [PATCH] community: Additional parameters for OpenAIAssistantV2Runnable (#29207) **Description:** Added Additional parameters that could be useful for usage of OpenAIAssistantV2Runnable. This change is thought to allow langchain users to set parameters that cannot be set using assistants UI (max_completion_tokens,max_prompt_tokens,parallel_tool_calls) and parameters that could be useful for experimenting like top_p and temperature. This PR originated from the need of using parallel_tool_calls in langchain, this parameter is very important in openAI assistants because without this parameter set to False strict mode is not respected by OpenAI Assistants (https://platform.openai.com/docs/guides/function-calling#parallel-function-calling). > Note: Currently, if the model calls multiple functions in one turn then strict mode will be disabled for those calls. **Issue:** None **Dependencies:** openai --- .../langchain/agents/openai_assistant/base.py | 48 ++++++++++++++++++- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/libs/langchain/langchain/agents/openai_assistant/base.py b/libs/langchain/langchain/agents/openai_assistant/base.py index 63457b63de2c7..148cfc85ab5a3 100644 --- a/libs/langchain/langchain/agents/openai_assistant/base.py +++ b/libs/langchain/langchain/agents/openai_assistant/base.py @@ -293,6 +293,12 @@ def invoke( instructions: Additional run instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. + parallel_tool_calls: Allow Assistant to set parallel_tool_calls + for this run. + top_p: Override Assistant top_p for this run. + temperature: Override Assistant temperature for this run. + max_completion_tokens: Allow setting max_completion_tokens for this run. + max_prompt_tokens: Allow setting max_prompt_tokens for this run. run_metadata: Metadata to associate with new run. config: Runnable config. Defaults to None. @@ -412,6 +418,12 @@ async def ainvoke( additional_instructions: Appends additional instructions. model: Override Assistant model for this run. tools: Override Assistant tools for this run. + parallel_tool_calls: Allow Assistant to set parallel_tool_calls + for this run. + top_p: Override Assistant top_p for this run. + temperature: Override Assistant temperature for this run. + max_completion_tokens: Allow setting max_completion_tokens for this run. + max_prompt_tokens: Allow setting max_prompt_tokens for this run. run_metadata: Metadata to associate with new run. config: Runnable config. Defaults to None. kwargs: Additional arguments. @@ -514,6 +526,11 @@ def _create_run(self, input: dict) -> Any: "model", "tools", "additional_instructions", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", "run_metadata", ) } @@ -527,7 +544,18 @@ def _create_thread_and_run(self, input: dict, thread: dict) -> Any: params = { k: v for k, v in input.items() - if k in ("instructions", "model", "tools", "run_metadata") + if k + in ( + "instructions", + "model", + "tools", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) } run = self.client.beta.threads.create_and_run( assistant_id=self.assistant_id, @@ -651,6 +679,11 @@ async def _acreate_run(self, input: dict) -> Any: "model", "tools", "additional_instructions", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", "run_metadata", ) } @@ -664,7 +697,18 @@ async def _acreate_thread_and_run(self, input: dict, thread: dict) -> Any: params = { k: v for k, v in input.items() - if k in ("instructions", "model", "tools", "run_metadata") + if k + in ( + "instructions", + "model", + "tools", + "parallel_tool_calls", + "top_p", + "temperature", + "max_completion_tokens", + "max_prompt_tokens", + "run_metadata", + ) } run = await self.async_client.beta.threads.create_and_run( assistant_id=self.assistant_id,