You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I worked on agent supervisor example of langchain but I kept getting this error: "Object of type CallbackManagerForToolRun is not JSON serializable" .This error only happens when the graph uses PythonREPLTool. Please help me out!
fromtypingimportAnnotatedfromlangchain_community.tools.tavily_searchimportTavilySearchResultsfromlangchain_experimental.toolsimportPythonREPLToolfromlangchain_community.utilitiesimportSQLDatabasefromlangchain_community.agent_toolkits.sql.toolkitimportSQLDatabaseToolkitfromlangchain_openaiimportChatOpenAIllm=ChatOpenAI(model="gpt-4-1106-preview")
tavily_tool=TavilySearchResults(max_results=5)
# This executes code locally, which can be unsafepython_repl_tool=PythonREPLTool()
db=SQLDatabase.from_uri("postgresql://postgres:password@localhost:5432/test")
sql_toolkit=SQLDatabaseToolkit(db=db, llm=llm)
fromlangchain.agentsimportAgentExecutor, create_openai_tools_agent, create_react_agentfromlangchain_core.messagesimportBaseMessage, HumanMessagefromlangchain_openaiimportChatOpenAIfromlangchain.promptsimportChatPromptTemplate, MessagesPlaceholderfromlangchain_core.output_parsers.openai_functionsimportJsonOutputFunctionsParserdefcreate_agent(llm: ChatOpenAI, tools: list, system_prompt: str):
# Each worker node will be given a name and some tools.prompt=ChatPromptTemplate.from_messages(
[
(
"system",
system_prompt,
),
MessagesPlaceholder(variable_name="messages"),
MessagesPlaceholder(variable_name="agent_scratchpad")
]
)
agent=create_openai_tools_agent(llm, tools, prompt)
executor=AgentExecutor(agent=agent, tools=tools)
returnexecutorfromlangchain_core.output_parsers.openai_functionsimportJsonOutputFunctionsParserfromlangchain_core.promptsimportChatPromptTemplate, MessagesPlaceholdermembers= ["Researcher", "Coder","Netflix DB Manager"]
system_prompt= (
"You are a supervisor tasked with managing a conversation between the"" following workers: {members}. Given the following user request,"" respond with the worker to act next. Each worker will perform a"" task and respond with their results and status. When finished,"" respond with FINISH."
)
# Our team supervisor is an LLM node. It just picks the next agent to process# and decides when the work is completedoptions= ["FINISH"] +members# Using openai function calling can make output parsing easier for usfunction_def= {
"name": "route",
"description": "Select the next role.",
"parameters": {
"title": "routeSchema",
"type": "object",
"properties": {
"next": {
"title": "Next",
"anyOf": [
{"enum": options},
],
}
},
"required": ["next"],
},
}
prompt=ChatPromptTemplate.from_messages(
[
("system", system_prompt),
MessagesPlaceholder(variable_name="messages"),
(
"system",
"Given the conversation above, who should act next?"" Or should we FINISH? Select one of: {options}",
),
]
).partial(options=str(options), members=", ".join(members))
llm=ChatOpenAI(model="gpt-4-1106-preview")
supervisor_chain= (
prompt|llm.bind_functions(functions=[function_def], function_call="route")
|JsonOutputFunctionsParser()
)
importfunctoolsimportoperatorfromtypingimportSequence, TypedDictfromlangchain_core.promptsimportChatPromptTemplate, MessagesPlaceholderfromlanggraph.graphimportEND, StateGraph, START# The agent state is the input to each node in the graphclassAgentState(TypedDict):
# The annotation tells the graph that new messages will always# be added to the current statesmessages: Annotated[Sequence[BaseMessage], operator.add]
# The 'next' field indicates where to route to nextnext: strresearch_agent=create_agent(llm, [tavily_tool], "You are a web researcher.")
research_node=functools.partial(agent_node, agent=research_agent, name="Researcher")
# NOTE: THIS PERFORMS ARBITRARY CODE EXECUTION. PROCEED WITH CAUTIONcode_agent=create_agent(
llm,
[python_repl_tool],
"You may generate safe python code to analyze data and generate charts using matplotlib.",
)
code_node=functools.partial(agent_node, agent=code_agent, name="Coder")
db_agent=create_agent(
llm,
sql_toolkit.get_tools(),
"You may execute SQL queries on database and return the result."
)
db_node=functools.partial(agent_node, agent=db_agent, name="Netflix DB Manager")
workflow=StateGraph(AgentState)
workflow.add_node("Researcher", research_node)
workflow.add_node("Coder", code_node)
workflow.add_node("Netflix DB Manager", db_node)
workflow.add_node("supervisor", supervisor_chain)
formemberinmembers:
# We want our workers to ALWAYS "report back" to the supervisor when doneworkflow.add_edge(member, "supervisor")
# The supervisor populates the "next" field in the graph state# which routes to a node or finishesconditional_map= {k: kforkinmembers}
conditional_map["FINISH"] =ENDworkflow.add_conditional_edges("supervisor", lambdax: x["next"], conditional_map)
# Finally, add entrypointworkflow.add_edge(START, "supervisor")
graph=workflow.compile()
forsingraph.stream(
{"messages": [HumanMessage(content="Run linked list program.")]},
{"recursion_limit": 100},
):
if"__end__"notins:
print(s)
print("----")
output:
TypeError Traceback (most recent call last)
Cell In[180], line 1
----> 1 for s in graph.stream(
2 {"messages": [HumanMessage(content="Run linked list program.")]},
3 {"recursion_limit": 100},
4 ):
5 if "end" not in s:
6 print(s)
File ~/Desktop/MyProjects/agent-generator/.env/lib/python3.11/site-packages/langgraph/pregel/init.py:948, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug) 945 del fut, task 947 # panic on failure or timeout
--> 948 _panic_or_proceed(done, inflight, loop.step) 949 # don't keep futures around in memory longer than needed 950 del done, inflight, futures
File ~/Desktop/MyProjects/agent-generator/.env/lib/python3.11/site-packages/langgraph/pregel/init.py:1349, in _panic_or_proceed(done, inflight, step, timeout_exc_cls) 1347 inflight.pop().cancel() 1348 # raise the exception
-> 1349 raise exc 1351 if inflight: 1352 # if we got here means we timed out 1353 while inflight: 1354 # cancel all pending tasks
... 179 """
--> 180 raise TypeError(f'Object of type {o.class.name} ' 181 f'is not JSON serializable')
TypeError: Object of type CallbackManagerForToolRun is not JSON serializable
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I worked on agent supervisor example of langchain but I kept getting this error: "Object of type CallbackManagerForToolRun is not JSON serializable" .This error only happens when the graph uses PythonREPLTool. Please help me out!
output:
TypeError Traceback (most recent call last)
Cell In[180], line 1
----> 1 for s in graph.stream(
2 {"messages": [HumanMessage(content="Run linked list program.")]},
3 {"recursion_limit": 100},
4 ):
5 if "end" not in s:
6 print(s)
File ~/Desktop/MyProjects/agent-generator/.env/lib/python3.11/site-packages/langgraph/pregel/init.py:948, in Pregel.stream(self, input, config, stream_mode, output_keys, interrupt_before, interrupt_after, debug)
945 del fut, task
947 # panic on failure or timeout
--> 948 _panic_or_proceed(done, inflight, loop.step)
949 # don't keep futures around in memory longer than needed
950 del done, inflight, futures
File ~/Desktop/MyProjects/agent-generator/.env/lib/python3.11/site-packages/langgraph/pregel/init.py:1349, in _panic_or_proceed(done, inflight, step, timeout_exc_cls)
1347 inflight.pop().cancel()
1348 # raise the exception
-> 1349 raise exc
1351 if inflight:
1352 # if we got here means we timed out
1353 while inflight:
1354 # cancel all pending tasks
...
179 """
--> 180 raise TypeError(f'Object of type {o.class.name} '
181 f'is not JSON serializable')
TypeError: Object of type CallbackManagerForToolRun is not JSON serializable
Beta Was this translation helpful? Give feedback.
All reactions