Sometime, the model will extract the false paramter, then maybe is good to validate the parameter and response to the model, let the model self -correct.
In the following code, i am using the ToolNode, how to validate the parameters before invoking the tool ?
from typing import Literal
from langgraph.types import Command
from nodes.router import router
from agents.course_qa_agent import course_qa_graph
from agents.general_agent import general_graph
from langgraph.graph import START, StateGraph, MessagesState, END
from langchain_core.messages import AIMessage, ToolMessage # 导入消息对象
from langgraph.prebuilt import ToolNode, tools_condition
from tools.api_client import load_chat_model, toolbox
model = load_chat_model()
course_tools = toolbox.load_toolset('course_information_tools')
class CourseQAState(MessagesState):
"""数据库问答代理的状态类"""
answer: str
def call_model(state: CourseQAState) -> dict:
"""
使用检索到的信息作为上下文回答问题
Args:
state: 数据库问答状态
Returns:
dict: 包含答案和消息的字典
"""
system_message = '你是一个高效的数据查询助手'
model_with_tools = model.bind_tools(course_tools)
response = model_with_tools.invoke([
{"role": "system", "content": system_message},
*state['messages']])
response.name = 'course_qa_agent'
answer = response.content
return {"answer": answer,
"messages": [response]}
course_qa_graph_builder = StateGraph(CourseQAState)
course_qa_graph_builder.add_node('course_call_model', call_model)
course_qa_graph_builder.add_node('course_tools', ToolNode(course_tools))
course_qa_graph_builder.add_edge(START, "course_call_model")
course_qa_graph_builder.add_conditional_edges(
"course_call_model",
tools_condition,
{'tools': 'course_tools', '__end__': END}
)
course_qa_graph_builder.add_edge("course_tools", "course_call_model")
course_qa_graph = course_qa_graph_builder.compile(checkpointer=True)
class AgentState(MessagesState):
"""代理状态类,继承自MessagesState"""
pass
def supervisor(state: AgentState) -> Command[
Literal['course_qa_agent', 'general_agent', END]]:
"""
主管代理,负责路由到相应的子代理
Args:
state: 代理状态
Returns:
Command: 包含目标节点的命令
"""
messages = state.get('messages', [])
last_message = messages[-1]
# 如果是AI消息或工具消息,结束流程
if isinstance(last_message, AIMessage) or isinstance(last_message, ToolMessage):
return Command(goto=END)
response = router(state)
intent = response.get('intent', '通用对话')
routing_map = {
'课程查询': 'course_qa_agent',
'通用对话': 'general_agent'
}
target = routing_map.get(intent, 'general_agent')
return Command(goto=target)
def course_qa_agent(state: AgentState) -> Command[Literal['supervisor']]:
"""
数据库问答代理
Args:
state: 代理状态
Returns:
Command: 返回主管代理的命令
"""
response = course_qa_graph.invoke({'messages': state.get('messages', [])})
last_message = response.get('messages', [])[-1]
return Command(goto='supervisor', update={'messages': last_message})
def general_agent(state: AgentState) -> Command[Literal['supervisor']]:
"""
通用对话代理
Args:
state: 代理状态
Returns:
Command: 返回主管代理的命令
"""
response = general_graph.invoke({'messages': state.get('messages', [])})
last_message = response.get('messages', [])[-1]
# print('response:',last_message)
return Command(goto='supervisor', update={'messages': last_message})
builder = StateGraph(AgentState)
builder.add_node(supervisor)
builder.add_node(course_qa_agent)
builder.add_edge(START, "supervisor")
refer:
Hi @santiagoahl
what is the definition of state in your graph? And what is the graph itself? Could you share as many code snippets as possible?
The chat history is preserved by your graph state and reducers (for example, using add_messages) or by a checkpointer. If your state replaces the message list with only the ToolMessage output, the next LLM call will start with a tool role message and OpenAI will reject it with the 400 error you saw.
1) Am I using it wrongly?
Likely yes - it seems that…
Hi @neel
thanks for you reply and feedback. This is how I understant what you are asking about:
Q1 - Confirm custom node behavior: “My custom tool node invokes multiple tools sequentially (loop) without handing over control during execution - correct?”
Yes. Your custom node processes tool calls in sequence and only returns control when the node returns. In your snippet, state is updated and ToolMessages are collected, then a single update is returned. Control flow changes only when the node r…
feng-1985:
from typing import Literal
from langgraph.types import Command
from nodes.router import router
from agents.course_qa_agent import course_qa_graph
from agents.general_agent import general_graph
from langgraph.graph import START, StateGraph, MessagesState, END
from langchain_core.messages import AIMessage, ToolMessage # 导入消息对象
from langgraph.prebuilt import ToolNode, tools_condition
from tools.api_client import load_chat_model, toolbox
model = load_chat_model()
course_tools = toolbox.load_toolset('course_information_tools')
class CourseQAState(MessagesState):
"""数据库问答代理的状态类"""
answer: str
def call_model(state: CourseQAState) -> dict:
"""
使用检索到的信息作为上下文回答问题
Args:
state: 数据库问答状态
Returns:
dict: 包含答案和消息的字典
"""
system_message = '你是一个高效的数据查询助手'
model_with_tools = model.bind_tools(course_tools)
response = model_with_tools.invoke([
{"role": "system", "content": system_message},
*state['messages']])
response.name = 'course_qa_agent'
answer = response.content
return {"answer": answer,
"messages": [response]}
course_qa_graph_builder = StateGraph(CourseQAState)
course_qa_graph_builder.add_node('course_call_model', call_model)
course_qa_graph_builder.add_node('course_tools', ToolNode(course_tools))
course_qa_graph_builder.add_edge(START, "course_call_model")
course_qa_graph_builder.add_conditional_edges(
"course_call_model",
tools_condition,
{'tools': 'course_tools', '__end__': END}
)
course_qa_graph_builder.add_edge("course_tools", "course_call_model")
course_qa_graph = course_qa_graph_builder.compile(checkpointer=True)
class AgentState(MessagesState):
"""代理状态类,继承自MessagesState"""
pass
def supervisor(state: AgentState) -> Command[
Literal['course_qa_agent', 'general_agent', END]]:
"""
主管代理,负责路由到相应的子代理
Args:
state: 代理状态
Returns:
Command: 包含目标节点的命令
"""
messages = state.get('messages', [])
last_message = messages[-1]
# 如果是AI消息或工具消息,结束流程
if isinstance(last_message, AIMessage) or isinstance(last_message, ToolMessage):
return Command(goto=END)
response = router(state)
intent = response.get('intent', '通用对话')
routing_map = {
'课程查询': 'course_qa_agent',
'通用对话': 'general_agent'
}
target = routing_map.get(intent, 'general_agent')
return Command(goto=target)
def course_qa_agent(state: AgentState) -> Command[Literal['supervisor']]:
"""
数据库问答代理
Args:
state: 代理状态
Returns:
Command: 返回主管代理的命令
"""
response = course_qa_graph.invoke({'messages': state.get('messages', [])})
last_message = response.get('messages', [])[-1]
return Command(goto='supervisor', update={'messages': last_message})
def general_agent(state: AgentState) -> Command[Literal['supervisor']]:
"""
通用对话代理
Args:
state: 代理状态
Returns:
Command: 返回主管代理的命令
"""
response = general_graph.invoke({'messages': state.get('messages', [])})
last_message = response.get('messages', [])[-1]
# print('response:',last_message)
return Command(goto='supervisor', update={'messages': last_message})
builder = StateGraph(AgentState)
builder.add_node(supervisor)
builder.add_node(course_qa_agent)
builder.add_edge(START, "supervisor")
hey @feng-1985 ! You could create a custom tool node that validates parameters before invoking tools, returning error messages for self-correction.
alternatively, you can also add an extra validation step before toolnode that does similar things.
thanks, is there anyway using the toolnode (langgraph.prebuilt) to implement it ?