Context is not accessible

In the following code, Why the tool is not getting invoked and context is not getting accessible. LLM I’m using is capable of tool calling. Here is the code

from dataclasses import dataclass
from typing import Annotated

import os

import dotenv
from langchain.agents import AgentState, create_agent
from langchain.tools import ToolRuntime, tool
from langchain_core.messages import AnyMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import add_messages

dotenv.load_dotenv()


def merge_dicts(current: dict, new: dict) -> dict:
    # Simple shallow merge
    return {**current, **new}


class AppState(AgentState):
    messages: Annotated[list[AnyMessage], add_messages]  # Conversation history
    user_preferences: Annotated[dict, merge_dicts]  # Custom mutable field


USER_DATABASE = {
    "user123": {
        "name": "Alice Johnson",
        "account_type": "Premium",
        "balance": 5000,
        "email": "alice@example.com",
    },
    "user456": {
        "name": "Bob Smith",
        "account_type": "Standard",
        "balance": 1200,
        "email": "bob@example.com",
    },
}


@dataclass
class UserContext:
    user_id: str


@tool
def get_account_info(
    query: str,
    runtime: ToolRuntime,
) -> str:
    """Get the current user's account information.

    Args:
        query: Free-form request (ignored except for debugging).

    The user_id is provided via runtime.context (passed at agent.invoke time).
    """
    user_id = runtime.context.user_id
    print("[INFO] Tool invoked. query=", query)
    print("[INFO] Tool invoked. runtime.context=", runtime.context)
    print("[INFO] Fetching account info for user_id:", user_id)

    user = USER_DATABASE.get(user_id)
    if not user:
        return "User not found"

    return (
        f"Account holder: {user['name']}\n"
        f"Type: {user['account_type']}\n"
        f"Balance: ${user['balance']}"
    )


checkpointer = MemorySaver()
model = ChatOpenAI(
    model=os.getenv("OPENAI_MODEL") or "gpt-4o-mini",
    temperature=0.1,
    use_responses_api=False,
)

agent = create_agent(
    model,
    tools=[get_account_info],
    context_schema=UserContext,
    state_schema=AppState,
    checkpointer=checkpointer,
    system_prompt="You are a financial assistant.",
)

config = {"configurable": {"thread_id": "session1"}}

prompt = {
    "messages": [
        HumanMessage(
            content=(
                "Get my account balance. Please call the get_account_info tool to return my account balance."
            )
        )
    ]
}
print("Invoking agent...")
result = agent.invoke(prompt, context=UserContext(user_id="user123"), config=config)

print("\n=== RESULT MESSAGES ===")
print(result["messages"][-1].content)

in the following code , tool is getting invoked and state is accessible.

from dataclasses import dataclass
from typing import Annotated

import dotenv
from langchain.chat_models import init_chat_model
from langchain.agents import create_agent, AgentState
from langchain.tools import tool, ToolRuntime
from langgraph.graph.message import add_messages
import os

from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage, AnyMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.types import Command

dotenv.load_dotenv()

def merge_dicts(current: dict, new: dict) -> dict:
    # Simple shallow merge
    return {**current, **new}

class AppState(AgentState):
    messages: Annotated[list[AnyMessage], add_messages] # Conversation history
    user_preferences: Annotated[dict, merge_dicts]  # Custom mutable field

# Access the current conversation state
@tool
def summarize_conversation(
    runtime: ToolRuntime
) -> str:
    """Summarize the conversation so far."""
    messages = runtime.state["messages"]

    human_msgs = sum(1 for m in messages if m.__class__.__name__ == "HumanMessage")
    ai_msgs = sum(1 for m in messages if m.__class__.__name__ == "AIMessage")
    tool_msgs = sum(1 for m in messages if m.__class__.__name__ == "ToolMessage")

    return f"Conversation has {human_msgs} user messages, {ai_msgs} AI responses, and {tool_msgs} tool results"

# Access custom state fields
@tool
def get_user_preference(
    pref_name: str,
    runtime: ToolRuntime  # ToolRuntime parameter is not visible to the model
) -> str:
    """Get a user preference value."""
    preferences = runtime.state.get("user_preferences", {})
    return preferences.get(pref_name, "Not set")

@tool
def update_user_preference(
    preference_item: str,
    preference_value:int,
    runtime: ToolRuntime
) -> Command:
    """Update the user's preference and preference values."""
    preferences = {preference_item: preference_value}

    print("In tool, updating preferences to:", preferences)

    return Command(update={"user_preferences": preferences,
                           "messages": [
                               ToolMessage(
                                   content="Preference updated successfully",
                                   tool_call_id=runtime.tool_call_id,
                               )
                           ]
                           })

checkpointer = MemorySaver()
model = ChatOpenAI(model=os.getenv("OPENAI_MODEL"), temperature=0.1)
agent = create_agent(
    model,
    tools=[get_user_preference,update_user_preference],
    checkpointer=checkpointer,
    state_schema= AppState
)

config = {"configurable": {"thread_id": "session1"}}




prompt = {
    "messages" : [
        HumanMessage(content="Update user's food preferences to 50")
    ]
}

agent.invoke(prompt,config=config)


prompt = {
    "messages" : [
        HumanMessage(content="Update user's music preferences to 80")
    ]
}

agent.invoke(prompt,config=config)

prompt = {
    "messages" : [
        HumanMessage(content="Update user's phone preferences to 70 and laptop preferences to 90")
    ]
}

agent.invoke(prompt,config=config)

prompt = {
    "messages" : [
        HumanMessage(content="What are user's laptop preferences value?")
    ]
}

agent.invoke(prompt,config=config)
# print("Session 1 final state:", agent.get_state(config).values)

# for message in agent.get_state(config).values['messages']:
#     print(message)

response = agent.get_state(config).values['messages'][-1].content
print(response)


I have following libs

pip show langchain langchain-openai langgraph openai | findstr /R /C:“Name” /C:“Version”
Name: langchain
Version: 1.0.1
Name: langchain-openai
Version: 1.0.0
Name: langgraph
Version: 1.0.0
Name: openai
Version: 2.6.0

Hi @yugandharreddy

it works for me:

[INFO] Tool invoked. query= Get account balance
[INFO] Tool invoked. runtime.context= {'user_id': 'user123'}
[INFO] Fetching account info for user_id: user123

=== RESULT MESSAGES ===
Your account balance is $5000.
Disconnected from server

try to upgrade the dep versions

I was using mistral-7b-instruct. after changing it to gpt-5-nano. I’m able to run first block of code.
Is tool calling capability is not enough to use runtime:ToolRuntim? @pawel-twardziak

hi @yugandharreddy

I’m not sure I follow - could you rephrase your question please?

Hi @pawel-twardziak ,

is runtime Injection capability of Lanchain depends on LLM?

if I’m able to access the AgentState in a tool with mistral-7b-instruct then Why I’m not able to access the context in a tool with same llm?

hi @yugandharreddy

Is it happening for mistral-7b-instruct model?

Hi @pawel-twardziak
Yes.

ok, I’ll test it with that model then and reach out to you again with the result

Hi @yugandharreddy

I’ve just tested mistral model with this script:

from dataclasses import dataclass
from typing_extensions import Annotated, TypedDict

import os

import dotenv
from langchain.agents import AgentState, create_agent
from langchain.tools import ToolRuntime, tool
from langchain_core.messages import AnyMessage, HumanMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import add_messages

dotenv.load_dotenv()

def _build_model():
    """Build a chat model.

    Default: OpenAI via `langchain_openai.ChatOpenAI`.
    Local option: Ollama via `langchain_ollama.ChatOllama` when enabled.
    """
    provider = (os.getenv("MODEL_PROVIDER", "") or "").strip().lower()
    use_ollama = (os.getenv("USE_OLLAMA", "") or "").strip().lower() in {"1", "true", "yes"}

    if provider == "ollama" or use_ollama:
        try:
            from langchain_ollama import ChatOllama
        except ImportError as e:
            raise RuntimeError(
                "Ollama model requested, but langchain_ollama is not installed. "
                "Install it (e.g. `pip install langchain-ollama`) or unset MODEL_PROVIDER/USE_OLLAMA."
            ) from e

        ollama_model = os.getenv("OLLAMA_MODEL") or "mistral:7b-instruct"
        ollama_host = os.getenv("OLLAMA_HOST") or "<default>"
        print(f"[INFO] Using model provider=ollama model={ollama_model} host={ollama_host}")

        return ChatOllama(
            model=ollama_model,
            temperature=0.1,
        )

    openai_model = os.getenv("OPENAI_MODEL") or "gpt-4o-mini"
    print(f"[INFO] Using model provider=openai model={openai_model}")

    return ChatOpenAI(
        model=openai_model,
        temperature=0.1,
        use_responses_api=False,
    )


def merge_dicts(current: dict, new: dict) -> dict:
    # Simple shallow merge
    return {**current, **new}


class AppState(AgentState):
    messages: Annotated[list[AnyMessage], add_messages]  # Conversation history
    user_preferences: Annotated[dict, merge_dicts]  # Custom mutable field


USER_DATABASE = {
    "user123": {
        "name": "Alice Johnson",
        "account_type": "Premium",
        "balance": 5000,
        "email": "alice@example.com",
    },
    "user456": {
        "name": "Bob Smith",
        "account_type": "Standard",
        "balance": 1200,
        "email": "bob@example.com",
    },
}


# @dataclass
class UserContext(TypedDict):
    user_id: str


@tool
def get_account_info(
    query: str,
    runtime: ToolRuntime,
) -> str:
    """Get the current user's account information.

    Args:
        query: Free-form request (ignored except for debugging).

    The user_id is provided via runtime.context (passed at agent.invoke time).
    """
    user_id = runtime.context['user_id']
    print("[INFO] Tool invoked. query=", query)
    print("[INFO] Tool invoked. runtime.context=", runtime.context)
    print("[INFO] Fetching account info for user_id:", user_id)

    user = USER_DATABASE.get(user_id)
    if not user:
        return "User not found"

    return (
        f"Account holder: {user['name']}\n"
        f"Type: {user['account_type']}\n"
        f"Balance: ${user['balance']}"
    )


checkpointer = MemorySaver()
model = _build_model()

agent = create_agent(
    model,
    tools=[get_account_info],
    context_schema=UserContext,
    state_schema=AppState,
    checkpointer=checkpointer,
    system_prompt="You are a financial assistant.",
)

config = {"configurable": {"thread_id": "session1"}}

prompt = {
    "messages": [
        HumanMessage(
            content=(
                "Get my account balance. Please call the get_account_info tool to return my account balance."
            )
        )
    ]
}
print("Invoking agent...")
result = agent.invoke(prompt, context=UserContext(user_id="user123"), config=config)

print("\n=== RESULT MESSAGES ===")
print(result["messages"][-1].content)

and all works fine:

Connected to server 127.0.0.1:34659
[INFO] Using model provider=ollama model=mistral:7b-instruct host=<default>
Invoking agent...
[field_name='context', input_value={'user_id': 'user123'}, input_type=dict])
  return self.__pydantic_serializer__.to_python(
[INFO] Tool invoked. query= Get account balance
[INFO] Tool invoked. query= Get account balance
[INFO] Tool invoked. runtime.context= {'user_id': 'user123'}
[INFO] Fetching account info for user_id:[INFO] Tool invoked. runtime.context= {'user_id': 'user123'}
[INFO] Fetching account info for user_id: user123
 user123

=== RESULT MESSAGES ===
 It appears that the account balance for Alice Johnson is $5000. Is there anything else you need assistance with?
Disconnected from server

What prvider do you use for the chat model?