How to generate response when the model without proper tools (multi-turn)?

The subgraph’s model is bind tools (the following course_qa_agent), when the query is not match the correct tool (maybe the query should rewrite, or the query is in the scope of subagent, but without correspond tool), then the model will response empty string.How to make the conversation continue ? maybe suggest better expression or tell the user “this query is no supported“ as response.

from typing import Literal
from langgraph.types import Command
from nodes.router import router
from agents.course_qa_agent import course_qa_graph
from agents.general_agent import general_graph
from langgraph.graph import START, StateGraph, MessagesState, END
from langchain_core.messages import AIMessage, ToolMessage  # 导入消息对象
from langgraph.prebuilt import ToolNode, tools_condition
from tools.api_client import load_chat_model, toolbox

model = load_chat_model()
course_tools = toolbox.load_toolset('course_information_tools')


class CourseQAState(MessagesState):
    """数据库问答代理的状态类"""
    answer: str


def call_model(state: CourseQAState) -> dict:
    """
    使用检索到的信息作为上下文回答问题

    Args:
        state: 数据库问答状态

    Returns:
        dict: 包含答案和消息的字典
    """

    system_message = '你是一个高效的数据查询助手'
    model_with_tools = model.bind_tools(course_tools)

    response = model_with_tools.invoke([
        {"role": "system", "content": system_message},
        *state['messages']])
    response.name = 'course_qa_agent'

    answer = response.content
    return {"answer": answer,
            "messages": [response]}


course_qa_graph_builder = StateGraph(CourseQAState)
course_qa_graph_builder.add_node('course_call_model', call_model)
course_qa_graph_builder.add_node('course_tools', ToolNode(course_tools))
course_qa_graph_builder.add_edge(START, "course_call_model")
course_qa_graph_builder.add_conditional_edges(
    "course_call_model",
    tools_condition,
    {'tools': 'course_tools', '__end__': END}
)
course_qa_graph_builder.add_edge("course_tools", "course_call_model")

course_qa_graph = course_qa_graph_builder.compile(checkpointer=True)



class AgentState(MessagesState):
    """代理状态类,继承自MessagesState"""
    pass


def supervisor(state: AgentState) -> Command[
    Literal['course_qa_agent', 'general_agent', END]]:
    """
    主管代理,负责路由到相应的子代理

    Args:
        state: 代理状态

    Returns:
        Command: 包含目标节点的命令
    """
    messages = state.get('messages', [])
    last_message = messages[-1]

    # 如果是AI消息或工具消息,结束流程
    if isinstance(last_message, AIMessage) or isinstance(last_message, ToolMessage):
        return Command(goto=END)

    response = router(state)
    intent = response.get('intent', '通用对话')

    routing_map = {
        '课程查询': 'course_qa_agent',
        '通用对话': 'general_agent'
    }
    target = routing_map.get(intent, 'general_agent')
    return Command(goto=target)


def course_qa_agent(state: AgentState) -> Command[Literal['supervisor']]:
    """
    数据库问答代理

    Args:
        state: 代理状态

    Returns:
        Command: 返回主管代理的命令
    """
    response = course_qa_graph.invoke({'messages': state.get('messages', [])})
    last_message = response.get('messages', [])[-1]
    return Command(goto='supervisor', update={'messages': last_message})


def general_agent(state: AgentState) -> Command[Literal['supervisor']]:
    """
    通用对话代理

    Args:
        state: 代理状态

    Returns:
        Command: 返回主管代理的命令
    """
    response = general_graph.invoke({'messages': state.get('messages', [])})
    last_message = response.get('messages', [])[-1]
    # print('response:',last_message)
    return Command(goto='supervisor', update={'messages': last_message})


builder = StateGraph(AgentState)
builder.add_node(supervisor)
builder.add_node(course_qa_agent)
builder.add_edge(START, "supervisor")

hi @feng-1985

how about this one:

from langchain_core.messages import AIMessage


def call_model(state: CourseQAState) -> dict:
    system_message = (
        "你是一个高效的课程信息查询助手。"
        "如果用户的问题和课程信息无关,或者没有合适的工具可以回答,"
        "请明确告诉用户当前问题不受支持,并建议如何改写。"
    )
    model_with_tools = model.bind_tools(course_tools)

    response = model_with_tools.invoke([
        {"role": "system", "content": system_message},
        *state["messages"],
    ])
    response.name = "course_qa_agent"

    # Fallback: no tools selected AND no text content
    if isinstance(response, AIMessage):
        tool_calls = getattr(response, "tool_calls", None) or []
        # `content` can be str or list; normalize to string
        text = response.content
        if isinstance(text, list):
            text = "".join(part.get("text", "") for part in text if isinstance(part, dict))
        text = (text or "").strip()

        if not tool_calls and not text:
            # Synthesize a user-facing fallback message
            response = AIMessage(
                content=(
                    "这个问题目前不在课程查询工具的支持范围内。\n"
                    "请尝试:\n"
                    "1. 说明你感兴趣的课程名称或编号;\n"
                    "2. 提出更具体的、与课程信息相关的问题,例如:上课时间、授课老师、学分等。"
                ),
                name="course_qa_fallback",
            )

    answer = response.content
    return {"answer": answer, "messages": [response]}

or this:

    # ... after the first response
    if isinstance(response, AIMessage):
        tool_calls = getattr(response, "tool_calls", None) or []
        text = (response.content or "").strip()

        if not tool_calls and not text:
            # Call the base chat model without tools to explain the limitation
            fallback = model.invoke([
                {
                    "role": "system",
                    "content": (
                        "你是一个课程问答助手。当你不能通过任何工具回答问题时,"
                        "请用中文向用户解释:当前问题不在支持范围内,并给出如何改写问题的建议。"
                    ),
                },
                *state["messages"],
            ])
            fallback.name = "course_qa_fallback"
            response = fallback

    answer = response.content
    return {"answer": answer, "messages": [response]}
1 Like

Or, second option:

def course_qa_agent(state: AgentState) -> Command[Literal["supervisor"]]:
    # Run the course QA subgraph
    course_state = course_qa_graph.invoke({"messages": state.get("messages", [])})
    last_message = course_state.get("messages", [])[-1]

    # Detect "no-answer" situation
    is_empty = False
    if isinstance(last_message, AIMessage):
        text = (last_message.content or "").strip()
        is_empty = len(text) == 0

    if is_empty:
        # Fall back to general agent
        general_state = general_graph.invoke({"messages": state.get("messages", [])})
        last_message = general_state.get("messages", [])[-1]

    # Append whichever message we ended up with
    return Command(goto="supervisor", update={"messages": last_message})

Thank you very much! In this cenario, how to better difference between the complete query expression (the query is in the scope of agent, but is not complete, and should be rewrite, then let use to comfirm.) and the query is out of the scope of agent ?

Hi @feng-1985

I think you’ll get much more control if you explicitly classify the user’s query before you ever call the tool-bound model. Make the model decide:

  • in_scope_complete: course-related and already precise → call tools normally.
  • in_scope_incomplete: course-related but missing key info → generate a rewrite + clarifying message and ask user to confirm.
  • out_of_scope: not about course info at all → either hand off to general_agent or say “not supported”.

You could do this as a separate node (no tools) using structured output, then branch your LangGraph based on that. This is a standard pattern in LangChain/LangGraph: “analyze → route → act” (see structured output docs: Structured output - Docs by LangChain and LangGraph routing examples: Router - Docs by LangChain and Workflows and agents - Docs by LangChain).

Exapmle

1. Define a structured classifier for the course agent

Use your base model (without tools) to analyze the query:

from typing import Literal, Optional
from pydantic import BaseModel, Field

class CourseQueryAnalysis(BaseModel):
    scope: Literal["in_scope_complete", "in_scope_incomplete", "out_of_scope"] = Field(
        description=(
            "in_scope_complete: 课程相关且信息足够,可以直接查询;"
            "in_scope_incomplete: 课程相关但缺少重要信息,需要澄清或改写;"
            "out_of_scope: 与课程信息无关。"
        )
    )
    rewritten_query: Optional[str] = Field(
        default=None,
        description="如果 scope 是 in_scope_incomplete,给出一个更完整的、适合用来查询课程信息的改写问题。",
    )
    clarification_message: Optional[str] = Field(
        default=None,
        description="如果 scope 是 in_scope_incomplete,用中文写一段发给用户的澄清/确认消息。",
    )

Then add a classifier node in your course subgraph:

def analyze_course_query(state: CourseQAState) -> dict:
    messages = state["messages"]
    analysis_model = model.with_structured_output(CourseQueryAnalysis)

    analysis = analysis_model.invoke([
        {
            "role": "system",
            "content": (
                "你是课程问答系统的分析助手。"
                "判断用户的问题是否与课程信息有关,以及信息是否完整。"
                "请严格按照 schema 输出。"
            ),
        },
        *messages,
    ])

    return {"analysis": analysis}

Now your CourseQAState should have an analysis field (e.g. analysis: CourseQueryAnalysis | None).

2. Use conditional edges in the subgraph

Rough layout of the course subgraph:

  • analyze_course_query → conditional edges:
  • in_scope_completecourse_call_model (use tools).
  • in_scope_incompletecourse_request_rewrite (ask user to confirm rewrite).
  • out_of_scopecourse_out_of_scope (explain or let outer graph route away).
course_qa_graph_builder = StateGraph(CourseQAState)
course_qa_graph_builder.add_node("analyze_course_query", analyze_course_query)
course_qa_graph_builder.add_node("course_call_model", call_model)
course_qa_graph_builder.add_node("course_request_rewrite", course_request_rewrite)
course_qa_graph_builder.add_node("course_out_of_scope", course_out_of_scope)

def course_scope_condition(state: CourseQAState) -> str:
    analysis = state["analysis"]
    return analysis.scope

course_qa_graph_builder.add_edge(START, "analyze_course_query")
course_qa_graph_builder.add_conditional_edges(
    "analyze_course_query",
    course_scope_condition,
    {
        "in_scope_complete": "course_call_model",
        "in_scope_incomplete": "course_request_rewrite",
        "out_of_scope": "course_out_of_scope",
    },
)

3. Implement the “incomplete but in-scope” and “out-of-scope” behaviors

Incomplete but in-scope – show rewrite + ask for confirmation:

from langchain_core.messages import AIMessage

def course_request_rewrite(state: CourseQAState) -> dict:
    analysis = state["analysis"]
    # Build a message to the user using the model's suggestion
    content = analysis.clarification_message or (
        "你的问题和课程信息有关,但还不够具体。\n"
        f"是否可以改写为:{analysis.rewritten_query!r}?\n"
        "如果可以,请回复“是”或直接使用该问题;如果不可以,请补充你想查询的课程名称/编号。"
    )
    msg = AIMessage(content=content, name="course_qa_clarification")
    return {"messages": [msg]}

Your outer supervisor will see an AIMessage and stop (as you already do), so the user can respond with confirmation. When they confirm, the next user turn can be interpreted as:

  • “Yes, use the generated rewrite” → you replace the user message in state with analysis.rewritten_query (or store it) and go through course_call_model.

Out-of-scope – clearly say it’s not the course agent’s job (or route to general_agent):

def course_out_of_scope(state: CourseQAState) -> dict:
    msg = AIMessage(
        content=(
            "这个问题不属于课程信息查询代理的职责范围,例如课程名称、时间、老师、学分等。\n"
            "你可以:\n"
            "1. 换一个与具体课程信息相关的问题;或者\n"
            "2. 让我切换到通用对话代理来尝试回答。"
        ),
        name="course_qa_out_of_scope",
    )
    return {"messages": [msg]}

If you prefer automatic routing instead of just a message, you can do that outside the subgraph (similar to how you already route between course_qa_agent and general_agent), by checking the name of the final AI message (e.g. “course_qa_out_of_scope”) and then sending the same input to general_agent.

1 Like

thanks, it seems make the progress be long, but it still worth to try.

1 Like

The Langchain learning curve isn’t that flat, it takes some time. You’re doing great! I’m still learning too.