From f18206891fa256650b1c84cd375257d936c43cbe Mon Sep 17 00:00:00 2001 From: vbarda Date: Mon, 13 Jan 2025 17:07:17 -0500 Subject: [PATCH] update --- .../langsmith-agent-simulation-evaluation.ipynb | 2 +- docs/docs/tutorials/llm-compiler/LLMCompiler.ipynb | 2 +- docs/docs/tutorials/rag/langgraph_agentic_rag.ipynb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/tutorials/chatbot-simulation-evaluation/langsmith-agent-simulation-evaluation.ipynb b/docs/docs/tutorials/chatbot-simulation-evaluation/langsmith-agent-simulation-evaluation.ipynb index 8bd724a7f..1e05a3daf 100644 --- a/docs/docs/tutorials/chatbot-simulation-evaluation/langsmith-agent-simulation-evaluation.ipynb +++ b/docs/docs/tutorials/chatbot-simulation-evaluation/langsmith-agent-simulation-evaluation.ipynb @@ -582,7 +582,7 @@ ")\n", "\n", "evaluator = prompt | ChatOpenAI(model=\"gpt-4-turbo-preview\").with_structured_output(\n", - " RedTeamingResult\n", + " RedTeamingResult, method=\"function_calling\"\n", ")\n", "\n", "\n", diff --git a/docs/docs/tutorials/llm-compiler/LLMCompiler.ipynb b/docs/docs/tutorials/llm-compiler/LLMCompiler.ipynb index 72dd7c03b..e6631632f 100644 --- a/docs/docs/tutorials/llm-compiler/LLMCompiler.ipynb +++ b/docs/docs/tutorials/llm-compiler/LLMCompiler.ipynb @@ -1032,7 +1032,7 @@ ") # You can optionally add examples\n", "llm = ChatOpenAI(model=\"gpt-4-turbo-preview\")\n", "\n", - "runnable = joiner_prompt | llm.with_structured_output(JoinOutputs)" + "runnable = joiner_prompt | llm.with_structured_output(JoinOutputs, method=\"function_calling\")" ] }, { diff --git a/docs/docs/tutorials/rag/langgraph_agentic_rag.ipynb b/docs/docs/tutorials/rag/langgraph_agentic_rag.ipynb index d5a788b57..6aef29a83 100644 --- a/docs/docs/tutorials/rag/langgraph_agentic_rag.ipynb +++ b/docs/docs/tutorials/rag/langgraph_agentic_rag.ipynb @@ -264,7 +264,7 @@ " model = ChatOpenAI(temperature=0, model=\"gpt-4-0125-preview\", streaming=True)\n", "\n", " # LLM with tool and validation\n", - " llm_with_tool = model.with_structured_output(grade)\n", + " llm_with_tool = model.with_structured_output(grade, method=\"function_calling\")\n", "\n", " # Prompt\n", " prompt = PromptTemplate(\n",