Hi,
I’m trying to make the simplest possible example of a multiagent system with a handoff tool (as a surrogate of a more complex system).
Agent A: addition and substraction
Agent M: multiplication and division
After handoff to agent M, even if I delete all the conversation history, the graph returns immediately to Agent A.
Why? What is the correct way of implementing this?
# agents/base_react_agent.py
from typing import Annotated, Literal
from langgraph.types import Command
from typing_extensions import TypedDict
# LangGraph
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langchain.chat_models import init_chat_model
from langchain_core.tools import tool
from langchain_core.messages import ToolMessage, AIMessage,SystemMessage
from langgraph.types import Overwrite
from langgraph.graph.message import REMOVE_ALL_MESSAGES
from langchain.messages import RemoveMessage
from langchain.agents import create_agent
from langchain.agents.middleware import wrap_tool_call
# Memory
from langgraph.checkpoint.memory import InMemorySaver
llm = init_chat_model("openai:gpt-4o-mini")
class State(TypedDict):
messages: Annotated[list, add_messages]
# Subgraph 1: Sum and subtract two numbers
@tool
def sum_two_numbers(arg1: float, arg2: float) -> float:
"""Sum two numbers"""
return arg1 + arg2
@tool
def subtract_two_numbers(arg1: float, arg2: float) -> float:
"""Subtract two numbers"""
return arg1 - arg2
# Subgraph 2: Multiply and divide two numbers
@tool
def multiply_two_numbers(arg1: float, arg2: float) -> float:
"""Multiply two numbers"""
return arg1 * arg2
@tool
def divide_two_numbers(arg1: float, arg2: float) -> float:
"""Divide two numbers"""
return arg1 / arg2
@tool
def handoff_tool(agent: str) -> Literal["subgraph_1", "subgraph_2"]:
"""Handoff to another agent based on it's name"""
print("Handoff to:", agent)
return Command(
goto=agent,
graph=Command.PARENT,
update = {
# "messages": [ToolMessage(content=f"Handing off to {agent}", tool_call_id=uuid.uuid4(), tool_name="handoff_tool")]
# "messages": [AIMessage(content=f"Handing off to {agent}")]
"messages": [RemoveMessage( id = REMOVE_ALL_MESSAGES)]
# "messages": Overwrite(value = "You are a recently spawned agent")
}
)
@wrap_tool_call
def tool_wrap(request, handler):
print(f"I'm a tool call middleware")
"""Handle tool execution errors with custom messages."""
try:
return handler(request)
except Exception as e:
# Return a custom error message to the model
return ToolMessage(
content=f"Tool error: Please check your input and try again. ({str(e)})",
tool_call_id=request.tool_call["id"]
)
SYSTEM_PROMPT_1 = """ You are an assistant that can sum and subtract two numbers.
Your name is Andres you perform additions and substractions, You can handoff to subgraph_2, an agent that can perform divisions and multiplications using the handoff tool"""
SYSTEM_PROMPT_2 = """ You are an assistant that can multiply and divide two numbers.
Your name is Maria you perform multiplications and divisions, You can handoff to subgraph_1, an agent that can perform additions and substractions using the handoff tool"""
agent_1 = create_agent(
model="openai:gpt-4o-mini",
system_prompt=SYSTEM_PROMPT_1,
tools=[sum_two_numbers, subtract_two_numbers, handoff_tool],
middleware=[tool_wrap])
agent_2 = create_agent(
model="openai:gpt-4o-mini",
tools=[multiply_two_numbers, divide_two_numbers, handoff_tool],
middleware=[tool_wrap],
system_prompt=SYSTEM_PROMPT_2)
main_graph_builder = StateGraph(State)
main_graph_builder.add_node("subgraph_1", agent_1)
main_graph_builder.add_node("subgraph_2", agent_2)
main_graph_builder.set_entry_point("subgraph_1")
main_graph = main_graph_builder.compile()
checkpointer = InMemorySaver()
main_graph = main_graph_builder.compile(checkpointer=checkpointer)
config = {"configurable": {"thread_id": "1"}}
def stream_graph_updates(user_input: str):
for event in main_graph.stream({"messages": [{"role": "user", "content": user_input}]}, config=config):
# pprint.pprint(event)
for node_name, value in event.items():
# Skip empty returns
if value is None:
continue
# ---- PRINT THE AGENT NAME ----
print(f"\n### Agent speaking: {node_name}\n")
for key, value in event.items():
if value is None:
continue # skip empty returns (like Command handoffs)
if "messages" in value and value["messages"]:
if "content" in value["messages"][-1]:
print("Assistant:", value["messages"][-1].content)
else:
print("Assistant:", str(value["messages"][-1]))
# print("------------META-------------")
# print("Message length:", len(value["messages"]))
# print(50*"-")
# for message in value["messages"]:
# pprint.pprint(message)
# print(50*"-")
# print("------------META-------------")
while True:
user_input = input("User: ")
if user_input.lower() in ["quit", "exit", "q"]:
print("Goodbye!")
stream_graph_updates(user_input)