Having issue while using interrupt with Agent Chat UI

Getting i am not able to interrupt i want when human type yes or approved it should so else take feedback

import random
from dotenv import load_dotenv
from datetime import datetime 
from typing import TypedDict, Annotated, Literal

from langchain.tools import tool
from langchain.messages import HumanMessage
from langgraph.types import interrupt, Command
from langchain_openrouter import ChatOpenRouter
from langgraph.graph.message import add_messages
from langgraph.graph import StateGraph, START, END
from langgraph.checkpoint.memory import InMemorySaver,MemorySaver
from langchain.messages import SystemMessage, HumanMessage, ToolMessage, AIMessage

load_dotenv()

def log_to_file(message: str):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    with open("debug_log.txt", "a") as f:
        f.write(f"[{timestamp}] {message}\n")

class AgentState(TypedDict):
    messages:Annotated[list,add_messages]
    human_approved:bool
    feedback: str

@tool
def current_time() -> str:
    """
    Get current timestamp
    """
    log_to_file('[Node current_time]')
    return f"Current Timestamp is : {str(datetime.now())}"

@tool
def get_weather(city: str) -> str:
    """
    Get weather for a given city.
    """
    log_to_file('[Node get_weather]')
    return f"It's always sunny in {city}!"

@tool
def get_contact_number(person_name: str) -> str:
    """
    Get preson contact number.
    """
    log_to_file('[Node get_contact_number]')
    num = random.randint(10**9, 10**10 - 1)
    return f"Contact Number of {person_name}: {num}"

tools = [current_time, get_weather, get_contact_number]
tools_by_name = {tool.name: tool for tool in tools}

# Augment the LLM with tools
llm_openrouter = ChatOpenRouter(model='nvidia/nemotron-3-super-120b-a12b:free')
llm_openrouter = ChatOpenRouter(model='stepfun/step-3.5-flash:free')

llm_with_tools = llm_openrouter.bind_tools(tools=tools)

# Nodes
def llm_call(state: AgentState):
    """LLM decides whether to call a tool or not"""
    log_to_file('[Node llm_call]')

    return {
        "messages": [
            llm_with_tools.invoke(
                [
                    SystemMessage(
                        content="You are a helpful assistant"
                    )
                ]
                + state["messages"]
            )
        ]
    }


def tool_node(state: dict):
    """Performs the tool call"""
    log_to_file('[Node tool_node]')

    result = []
    for tool_call in state["messages"][-1].tool_calls:
        tool = tools_by_name[tool_call["name"]]
        observation = tool.invoke(tool_call["args"])
        result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
    return {"messages": result}


# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
def should_continue(state: AgentState) -> Literal["tool_node", 'approval_node']:
    """Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
    log_to_file('[Node should_continue]')

    messages = state["messages"]
    last_message = messages[-1]

    # If the LLM makes a tool call, then perform an action
    if last_message.tool_calls:
        return "tool_node"

    # Otherwise, we stop (reply to the user)
    return 'approval_node'

def human_in_loop_node(state: AgentState)-> Literal["llm_call", END]:
    log_to_file('[Node human_in_loop_node]')
    if state["human_approved"]:
        return END
    return 'llm_call'

def approval_node(state: AgentState):
    log_to_file('[Node approval_node]')

    last_msg = state["messages"][-1]

    # ✅ Case 1: We resumed → user has given feedback
    if isinstance(last_msg, HumanMessage):
        feedback = last_msg.content.strip().lower()

        log_to_file(f"[Approval Feedback]: {feedback}")

        return {
            "human_approved": feedback == "yes"
        }

    # ✅ Case 2: First time → pause execution
    log_to_file(">>> Interrupting for approval")

    interrupt({
        "type": "approval",
        "question": "Review this email. Approve or give feedback.",
        "instructions": "Type 'Yes' if approved, otherwise provide feedback."
    })

    # ❗ This return will NEVER execute but is required for graph consistency
    return {}

# def approval_node(state: AgentState):
#     with open("debug_log.txt", "a") as f:
#         f.write("\n--- Node Start ---\n")
#         f.write(f"Before interrupt: {state}\n")
#     # Pause and ask for approval
#     human_approved = False
#     # feedback = interrupt("Review this email. Approve or give feedback. Type 'Yes' if approved:")
#     print(state["messages"][-1].content)

#     interrupt({
#         "type": "approval",
#         "question": "Review this email. Approve or give feedback.",
#         "instructions": "Type 'Yes' if approved, otherwise provide feedback."
#     })
#     last_msg = state["messages"][-1].content

#     with open("debug_log.txt", "a") as f:
#         f.write(f"After resume: {last_msg}\n")

#     with open("debug_log.txt", "a") as f:
#         f.write(f"feedback resume: {feedback}\n")     

#     print(last_msg.content)
#     print("Interrupt returned:", feedback, type(feedback))
#     state["messages"].append(HumanMessage(content=feedback))
#     if  feedback["input"]["messages"][0]["content"][0]["text"].lower().strip()=='yes':
#         human_approved = True
#     return {"feedback": feedback, "human_approved": human_approved}

# Build workflow
agent_builder = StateGraph(AgentState)

# Add nodes
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("tool_node", tool_node)
agent_builder.add_node('approval_node', approval_node)

# Add edges to connect nodes
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges(
    "llm_call",
    should_continue,
    ["tool_node", 'approval_node']
)
agent_builder.add_edge("tool_node", "llm_call")
agent_builder.add_conditional_edges("approval_node",
                                  human_in_loop_node,["llm_call",END])

# Compile the agent
#agent = agent_builder.compile(checkpointer=MemorySaver())
agent = agent_builder.compile()

config = {"configurable": {"thread_id": "thread-1"}}

# Invoke
# query = input("Enter the query: ")
# messages = [HumanMessage(content=query)]
# messages = agent.invoke({"messages": messages}, config=config, version="v2")
# for m in messages["messages"]:
#     m.pretty_print()

# print("Intrupt for feedback")
# messages = agent.invoke(Command(resume = 'Yes'), config=config, version="v2")

# for m in messages["messages"]:
#     m.pretty_print()

hi @narendrasingodia1998

some issues I’ve found:

  1. Missing checkpointer - agent_builder.compile() is called without a checkpointer. interrupt() requires one to persist state. Fix: compile(checkpointer=InMemorySaver())
  2. interrupt() return value not captured - The user calls interrupt(…) but discards its return value. When the graph resumes, interrupt() returns the human’s response. Fix: answer = interrupt(...)
  3. Wrong resume detection pattern - The user checks isinstance(last_msg, HumanMessage) to detect resumption, but after resume the messages in state are unchanged - the resume value only comes through interrupt()'s return value. Fix: remove the isinstance check entirely and rely on the return value from bug #2

docs: Interrupts - Docs by LangChain

Hi,

Thank you for your response. I am currently using the Agent Chat UI (Agent Chat UI - Docs by LangChain) for chat with graph, where a checkpoint is not required.

Point 1: I have implemented checkpoints in the code; however, while using the Chat UI, they need to be disabled. That is why I have commented out those lines.

Point 2: I have another function that handles the same logic, but it is also not working.

Please let me know if you need any additional details.

Use langchain primitives, not your own.

checkpointer and interrupt is a must - don’t change it to your own functions otherwise you will be asking for problems :slight_smile:

while using the Chat UI, they need to be disabled.

i don’t think so. Why?
Chat UI has nothing to do with the backend agent - it only communicates with the agenst, shows the UI and send user interaction back to the backend agent.

Below is the updated code :slight_smile:

import random
from dotenv import load_dotenv
from datetime import datetime 
from typing import TypedDict, Annotated, Literal

from langchain.tools import tool
from langchain.messages import HumanMessage
from langgraph.types import interrupt, Command
from langchain_openrouter import ChatOpenRouter
from langgraph.graph.message import add_messages
from langgraph.graph import StateGraph, START, END
from langgraph.checkpoint.memory import InMemorySaver,MemorySaver
from langchain.messages import SystemMessage, HumanMessage, ToolMessage, AIMessage

load_dotenv()

def log_to_file(message: str):
    timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    with open("debug_log.txt", "a") as f:
        f.write(f"[{timestamp}] {message}\n")

class AgentState(TypedDict):
    messages:Annotated[list,add_messages]
    human_approved:bool
    feedback: str

@tool
def current_time() -> str:
    """
    Get current timestamp
    """
    log_to_file('[Node current_time]')
    return f"Current Timestamp is : {str(datetime.now())}"

@tool
def get_weather(city: str) -> str:
    """
    Get weather for a given city.
    """
    log_to_file('[Node get_weather]')
    return f"It's always sunny in {city}!"

@tool
def get_contact_number(person_name: str) -> str:
    """
    Get preson contact number.
    """
    log_to_file('[Node get_contact_number]')
    num = random.randint(10**9, 10**10 - 1)
    return f"Contact Number of {person_name}: {num}"

tools = [current_time, get_weather, get_contact_number]
tools_by_name = {tool.name: tool for tool in tools}

# Augment the LLM with tools
llm_openrouter = ChatOpenRouter(model='nvidia/nemotron-3-super-120b-a12b:free')
llm_openrouter = ChatOpenRouter(model='stepfun/step-3.5-flash:free')

llm_with_tools = llm_openrouter.bind_tools(tools=tools)

# Nodes
def llm_call(state: AgentState):
    """LLM decides whether to call a tool or not"""
    log_to_file('[Node llm_call]')

    return {
        "messages": [
            llm_with_tools.invoke(
                [
                    SystemMessage(
                        content="You are a helpful assistant"
                    )
                ]
                + state["messages"]
            )
        ]
    }


def tool_node(state: dict):
    """Performs the tool call"""
    log_to_file('[Node tool_node]')

    result = []
    for tool_call in state["messages"][-1].tool_calls:
        tool = tools_by_name[tool_call["name"]]
        observation = tool.invoke(tool_call["args"])
        result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
    return {"messages": result}


# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
def should_continue(state: AgentState) -> Literal["tool_node", 'approval_node']:
    """Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
    log_to_file('[Node should_continue]')

    messages = state["messages"]
    last_message = messages[-1]

    # If the LLM makes a tool call, then perform an action
    if last_message.tool_calls:
        return "tool_node"

    # Otherwise, we stop (reply to the user)
    return 'approval_node'

def human_in_loop_node(state: AgentState)-> Literal["llm_call", END]:
    log_to_file('[Node human_in_loop_node]')
    if state["human_approved"]:
        return END
    return 'llm_call'

def approval_node(state: AgentState):
    log_to_file('[Node approval_node]')
    # ✅ Case 2: First time → pause execution
    log_to_file(">>> Interrupting for approval")

    answer = interrupt({
        "type": "approval",
        "question": "Review this email. Approve or give feedback.",
        "instructions": "Type 'Yes' if approved, otherwise provide feedback."
    })
    human_approved = False
    if answer.lower().strip()=='yes':
        human_approved = True

    # ❗ This return will NEVER execute but is required for graph consistency
    return {'human_approved':human_approved}

# Build workflow
agent_builder = StateGraph(AgentState)

# Add nodes
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("tool_node", tool_node)
agent_builder.add_node('approval_node', approval_node)

# Add edges to connect nodes
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges(
    "llm_call",
    should_continue,
    ["tool_node", 'approval_node']
)
agent_builder.add_edge("tool_node", "llm_call")
agent_builder.add_conditional_edges("approval_node",
                                  human_in_loop_node,["llm_call",END])

# Compile the agent
agent = agent_builder.compile(checkpointer=InMemorySaver())




Json file langgraph.json

{

“dependencies”: [“.”],

“graphs”: {

“agent”: “./hello_world.py:agent”

},

“env”: “.env”

}

Running this command
langgraph dev

Getting this error :

File “/Users/narendra.singodia/Desktop/coding_agent/.venv/lib/python3.11/site-packages/langgraph_api/graph.py”, line 496, in collect_graphs_from_env
raise GraphLoadError(spec, exc) from exc
langgraph_api.utils.errors.GraphLoadError: Failed to load graph ‘agent’ from ./hello_world.py: Heads up! Your graph ‘agent’ from ‘./hello_world.py’ includes a custom checkpointer (type <class ‘langgraph.checkpoint.memory.InMemorySaver’>). With LangGraph API, persistence is handled automatically by the platform, so providing a custom checkpointer (type <class ‘langgraph.checkpoint.memory.InMemorySaver’>) here isn’t necessary and will be ignored when deployed.

To simplify your setup and use the built-in persistence, please remove the custom checkpointer (type <class ‘langgraph.checkpoint.memory.InMemorySaver’>) from your graph definition. If you are looking to customize which postgres database to connect to, please set the POSTGRES_URI environment variable. See Redirecting... for more details.

By reading this error i have commented the checkpoint

Ok, I get it now… investigating…

when you run langgraph dev, your custom checkpointer overridden by the default one that is being provided by the LangSmith itself for development mode.

So remove the checkpoiner, keep the interrupt logic as it is and tell me please whether it works - if it doesn’t, what are the issues/errors?

currently have issue to display text interrupt on chat ui and getting result from interrupt

help me what should be schema