In the following code, Why the tool is not getting invoked and context is not getting accessible. LLM I’m using is capable of tool calling. Here is the code
from dataclasses import dataclass
from typing import Annotated
import os
import dotenv
from langchain.agents import AgentState, create_agent
from langchain.tools import ToolRuntime, tool
from langchain_core.messages import AnyMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import add_messages
dotenv.load_dotenv()
def merge_dicts(current: dict, new: dict) -> dict:
# Simple shallow merge
return {**current, **new}
class AppState(AgentState):
messages: Annotated[list[AnyMessage], add_messages] # Conversation history
user_preferences: Annotated[dict, merge_dicts] # Custom mutable field
USER_DATABASE = {
"user123": {
"name": "Alice Johnson",
"account_type": "Premium",
"balance": 5000,
"email": "alice@example.com",
},
"user456": {
"name": "Bob Smith",
"account_type": "Standard",
"balance": 1200,
"email": "bob@example.com",
},
}
@dataclass
class UserContext:
user_id: str
@tool
def get_account_info(
query: str,
runtime: ToolRuntime,
) -> str:
"""Get the current user's account information.
Args:
query: Free-form request (ignored except for debugging).
The user_id is provided via runtime.context (passed at agent.invoke time).
"""
user_id = runtime.context.user_id
print("[INFO] Tool invoked. query=", query)
print("[INFO] Tool invoked. runtime.context=", runtime.context)
print("[INFO] Fetching account info for user_id:", user_id)
user = USER_DATABASE.get(user_id)
if not user:
return "User not found"
return (
f"Account holder: {user['name']}\n"
f"Type: {user['account_type']}\n"
f"Balance: ${user['balance']}"
)
checkpointer = MemorySaver()
model = ChatOpenAI(
model=os.getenv("OPENAI_MODEL") or "gpt-4o-mini",
temperature=0.1,
use_responses_api=False,
)
agent = create_agent(
model,
tools=[get_account_info],
context_schema=UserContext,
state_schema=AppState,
checkpointer=checkpointer,
system_prompt="You are a financial assistant.",
)
config = {"configurable": {"thread_id": "session1"}}
prompt = {
"messages": [
HumanMessage(
content=(
"Get my account balance. Please call the get_account_info tool to return my account balance."
)
)
]
}
print("Invoking agent...")
result = agent.invoke(prompt, context=UserContext(user_id="user123"), config=config)
print("\n=== RESULT MESSAGES ===")
print(result["messages"][-1].content)
in the following code , tool is getting invoked and state is accessible.
from dataclasses import dataclass
from typing import Annotated
import dotenv
from langchain.chat_models import init_chat_model
from langchain.agents import create_agent, AgentState
from langchain.tools import tool, ToolRuntime
from langgraph.graph.message import add_messages
import os
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage, AnyMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import Command
dotenv.load_dotenv()
def merge_dicts(current: dict, new: dict) -> dict:
# Simple shallow merge
return {**current, **new}
class AppState(AgentState):
messages: Annotated[list[AnyMessage], add_messages] # Conversation history
user_preferences: Annotated[dict, merge_dicts] # Custom mutable field
# Access the current conversation state
@tool
def summarize_conversation(
runtime: ToolRuntime
) -> str:
"""Summarize the conversation so far."""
messages = runtime.state["messages"]
human_msgs = sum(1 for m in messages if m.__class__.__name__ == "HumanMessage")
ai_msgs = sum(1 for m in messages if m.__class__.__name__ == "AIMessage")
tool_msgs = sum(1 for m in messages if m.__class__.__name__ == "ToolMessage")
return f"Conversation has {human_msgs} user messages, {ai_msgs} AI responses, and {tool_msgs} tool results"
# Access custom state fields
@tool
def get_user_preference(
pref_name: str,
runtime: ToolRuntime # ToolRuntime parameter is not visible to the model
) -> str:
"""Get a user preference value."""
preferences = runtime.state.get("user_preferences", {})
return preferences.get(pref_name, "Not set")
@tool
def update_user_preference(
preference_item: str,
preference_value:int,
runtime: ToolRuntime
) -> Command:
"""Update the user's preference and preference values."""
preferences = {preference_item: preference_value}
print("In tool, updating preferences to:", preferences)
return Command(update={"user_preferences": preferences,
"messages": [
ToolMessage(
content="Preference updated successfully",
tool_call_id=runtime.tool_call_id,
)
]
})
checkpointer = MemorySaver()
model = ChatOpenAI(model=os.getenv("OPENAI_MODEL"), temperature=0.1)
agent = create_agent(
model,
tools=[get_user_preference,update_user_preference],
checkpointer=checkpointer,
state_schema= AppState
)
config = {"configurable": {"thread_id": "session1"}}
prompt = {
"messages" : [
HumanMessage(content="Update user's food preferences to 50")
]
}
agent.invoke(prompt,config=config)
prompt = {
"messages" : [
HumanMessage(content="Update user's music preferences to 80")
]
}
agent.invoke(prompt,config=config)
prompt = {
"messages" : [
HumanMessage(content="Update user's phone preferences to 70 and laptop preferences to 90")
]
}
agent.invoke(prompt,config=config)
prompt = {
"messages" : [
HumanMessage(content="What are user's laptop preferences value?")
]
}
agent.invoke(prompt,config=config)
# print("Session 1 final state:", agent.get_state(config).values)
# for message in agent.get_state(config).values['messages']:
# print(message)
response = agent.get_state(config).values['messages'][-1].content
print(response)
I have following libs
pip show langchain langchain-openai langgraph openai | findstr /R /C:“Name” /C:“Version”
Name: langchain
Version: 1.0.1
Name: langchain-openai
Version: 1.0.0
Name: langgraph
Version: 1.0.0
Name: openai
Version: 2.6.0