Getting i am not able to interrupt i want when human type yes or approved it should so else take feedback
import random
from dotenv import load_dotenv
from datetime import datetime
from typing import TypedDict, Annotated, Literal
from langchain.tools import tool
from langchain.messages import HumanMessage
from langgraph.types import interrupt, Command
from langchain_openrouter import ChatOpenRouter
from langgraph.graph.message import add_messages
from langgraph.graph import StateGraph, START, END
from langgraph.checkpoint.memory import InMemorySaver,MemorySaver
from langchain.messages import SystemMessage, HumanMessage, ToolMessage, AIMessage
load_dotenv()
def log_to_file(message: str):
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open("debug_log.txt", "a") as f:
f.write(f"[{timestamp}] {message}\n")
class AgentState(TypedDict):
messages:Annotated[list,add_messages]
human_approved:bool
feedback: str
@tool
def current_time() -> str:
"""
Get current timestamp
"""
log_to_file('[Node current_time]')
return f"Current Timestamp is : {str(datetime.now())}"
@tool
def get_weather(city: str) -> str:
"""
Get weather for a given city.
"""
log_to_file('[Node get_weather]')
return f"It's always sunny in {city}!"
@tool
def get_contact_number(person_name: str) -> str:
"""
Get preson contact number.
"""
log_to_file('[Node get_contact_number]')
num = random.randint(10**9, 10**10 - 1)
return f"Contact Number of {person_name}: {num}"
tools = [current_time, get_weather, get_contact_number]
tools_by_name = {tool.name: tool for tool in tools}
# Augment the LLM with tools
llm_openrouter = ChatOpenRouter(model='nvidia/nemotron-3-super-120b-a12b:free')
llm_openrouter = ChatOpenRouter(model='stepfun/step-3.5-flash:free')
llm_with_tools = llm_openrouter.bind_tools(tools=tools)
# Nodes
def llm_call(state: AgentState):
"""LLM decides whether to call a tool or not"""
log_to_file('[Node llm_call]')
return {
"messages": [
llm_with_tools.invoke(
[
SystemMessage(
content="You are a helpful assistant"
)
]
+ state["messages"]
)
]
}
def tool_node(state: dict):
"""Performs the tool call"""
log_to_file('[Node tool_node]')
result = []
for tool_call in state["messages"][-1].tool_calls:
tool = tools_by_name[tool_call["name"]]
observation = tool.invoke(tool_call["args"])
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
return {"messages": result}
# Conditional edge function to route to the tool node or end based upon whether the LLM made a tool call
def should_continue(state: AgentState) -> Literal["tool_node", 'approval_node']:
"""Decide if we should continue the loop or stop based upon whether the LLM made a tool call"""
log_to_file('[Node should_continue]')
messages = state["messages"]
last_message = messages[-1]
# If the LLM makes a tool call, then perform an action
if last_message.tool_calls:
return "tool_node"
# Otherwise, we stop (reply to the user)
return 'approval_node'
def human_in_loop_node(state: AgentState)-> Literal["llm_call", END]:
log_to_file('[Node human_in_loop_node]')
if state["human_approved"]:
return END
return 'llm_call'
def approval_node(state: AgentState):
log_to_file('[Node approval_node]')
last_msg = state["messages"][-1]
# ✅ Case 1: We resumed → user has given feedback
if isinstance(last_msg, HumanMessage):
feedback = last_msg.content.strip().lower()
log_to_file(f"[Approval Feedback]: {feedback}")
return {
"human_approved": feedback == "yes"
}
# ✅ Case 2: First time → pause execution
log_to_file(">>> Interrupting for approval")
interrupt({
"type": "approval",
"question": "Review this email. Approve or give feedback.",
"instructions": "Type 'Yes' if approved, otherwise provide feedback."
})
# ❗ This return will NEVER execute but is required for graph consistency
return {}
# def approval_node(state: AgentState):
# with open("debug_log.txt", "a") as f:
# f.write("\n--- Node Start ---\n")
# f.write(f"Before interrupt: {state}\n")
# # Pause and ask for approval
# human_approved = False
# # feedback = interrupt("Review this email. Approve or give feedback. Type 'Yes' if approved:")
# print(state["messages"][-1].content)
# interrupt({
# "type": "approval",
# "question": "Review this email. Approve or give feedback.",
# "instructions": "Type 'Yes' if approved, otherwise provide feedback."
# })
# last_msg = state["messages"][-1].content
# with open("debug_log.txt", "a") as f:
# f.write(f"After resume: {last_msg}\n")
# with open("debug_log.txt", "a") as f:
# f.write(f"feedback resume: {feedback}\n")
# print(last_msg.content)
# print("Interrupt returned:", feedback, type(feedback))
# state["messages"].append(HumanMessage(content=feedback))
# if feedback["input"]["messages"][0]["content"][0]["text"].lower().strip()=='yes':
# human_approved = True
# return {"feedback": feedback, "human_approved": human_approved}
# Build workflow
agent_builder = StateGraph(AgentState)
# Add nodes
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("tool_node", tool_node)
agent_builder.add_node('approval_node', approval_node)
# Add edges to connect nodes
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges(
"llm_call",
should_continue,
["tool_node", 'approval_node']
)
agent_builder.add_edge("tool_node", "llm_call")
agent_builder.add_conditional_edges("approval_node",
human_in_loop_node,["llm_call",END])
# Compile the agent
#agent = agent_builder.compile(checkpointer=MemorySaver())
agent = agent_builder.compile()
config = {"configurable": {"thread_id": "thread-1"}}
# Invoke
# query = input("Enter the query: ")
# messages = [HumanMessage(content=query)]
# messages = agent.invoke({"messages": messages}, config=config, version="v2")
# for m in messages["messages"]:
# m.pretty_print()
# print("Intrupt for feedback")
# messages = agent.invoke(Command(resume = 'Yes'), config=config, version="v2")
# for m in messages["messages"]:
# m.pretty_print()
