The project works fine when LangGraph is built and run locally, but the issues appear only when deploying in the cloud environment. When I point in LANGGRAPH_URL=‘http://127.0.0.1:2024’, its working fine but in langgraph cloud its not working when resuming.
Here is the code snippet:
`from typing import Callable, Optional
from langchain_core.tools import BaseTool, tool as create_tool
from langchain_core.runnables import RunnableConfig
from langgraph.types import interrupt, Command
from langgraph.prebuilt.interrupt import HumanInterruptConfig, HumanInterrupt
from langgraph_app.states.file_data import FileData
def add_human_in_the_loop(
tool: Callable | BaseTool,
*,
interrupt_config: HumanInterruptConfig = None,
) → BaseTool:
“”“Wrap a tool to support human-in-the-loop review.”“”
if not isinstance(tool, BaseTool):
tool = create_tool(tool)
if interrupt_config is None:
interrupt_config = {
"allow_accept": True,
"allow_edit": True,
"allow_respond": True,
}
@create_tool(
tool.name,
description=tool.description,
args_schema=tool.args_schema
)
async def call_tool_with_interrupt(config: RunnableConfig, question: str, field_asked_for: Optional[str]=None, **tool_input):
tool_call_id = (
tool_input.pop("tool_call_id", None)
or tool_input.pop("_tool_call_id", None)
)
request: HumanInterrupt = {
"action_request": {
"action": tool.name,
"args": {"question": question, "field_asked_for": field_asked_for}
},
"config": interrupt_config,
"description": "Please review the tool call"
}
response = interrupt([request])
text = ""
imgs = []
files = []
for humanmsg in response:
obj_list=humanmsg.content
for obj in obj_list:
# if obj.get("type")=="text":
# text += obj.get("content", "")
if obj.get("type") == "text":
text_value = obj.get("content") or obj.get("text") or ""
text += text_value
elif obj.get("type") == "image_url":
imgs.append(obj.get("image_url").get("url"))
elif obj.get("type") == "file":
file_content = obj.get("content", {})
files.append(FileData(file_name=file_content.get("filename", ""), file_data=file_content.get("file_data", "")))
tool_input["user_text_response"] = text
tool_input["user_img_response"] = imgs
tool_input["user_file_response"] = files
print(f"----------->>{tool_call_id}")
# tool_input["tool_call_id"] = tool_call_id
tool_input["question"] = question
tool_input["field_asked_for"] = field_asked_for
tool_response = await tool.ainvoke(tool_input, config)
return tool_response
return call_tool_with_interrupt`
`
@tool
async def need_human_input(question: str, user_text_response: Optional[str], state: Annotated[State, InjectedState],
tool_call_id: Annotated[str, InjectedToolCallId]):
“”"
Use this tool when you need to ask user about product or clarification.
Example: “Which product are you referring to?” or “Is it shower cubicle or sliding door?”
“”"
new_msgs = [
ToolMessage(
f"Waiting for response", tool_call_id=tool_call_id,
name='ask_query_to_human'),
AIMessage(content=question),
]
user_response = []
if user_text_response:
user_response.append({"type": "text", "text": user_text_response})
state.messages = new_msgs + [HumanMessage(content=user_response)]
return Command(update=state)
def fetch_supervisor_tools():
TOOLS: List[Callable[…, Any]] = [
add_human_in_the_loop(need_human_input),
]
return TOOLS`
ERROR: When tool includes an InjectedToolCallId argument, tool must always be invoked with a full model ToolCall of the form: {‘args’: {…}, ‘name’: ‘…’, ‘type’: ‘tool_call’, ‘tool_call_id’: ‘…’}
This above error is not raised in local langgraph dev but raised in langgraph cloud issue. Is it due to InjectedToolCallID param in the tool?