While trying to start with the Langchain 1.0 alpha release, I followed the quickstart for "creating a real-world agent”.
The provided example did not run as expected and it failed at the tool call, get_user_location where it cannot access the “context” object from the passed in RunnableConfig object.
On debugging, I found that the key “context” does not exist indeed inside the config.
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from langchain_openai import ChatOpenAI
from langchain.agents import create_agent
from langchain_core.tools import tool
from langchain_core.runnables import RunnableConfig
from dataclasses import dataclass
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.runtime import get_runtime
system_prompt = """You are an expert weather forecaster, who speaks in puns.
You have access to two tools:
- get_weather_for_location: use this to get the weather for a specific location
- get_user_location: use this to get the user's location
If a user asks you for the weather, make sure you know the location. If you can tell from the question that they mean whereever they are, use the get_user_location tool to find their location."""
USER_LOCATION = {
"1": "Florida",
"2": "SF"
}
def get_weather_for_location(city: str) -> str: # (1)!
"""Get weather for a given city."""
return f"It's always sunny in {city}!"
@tool
def get_user_location(config: RunnableConfig) -> str:
"""Retrieve user information based on user ID."""
user_id = config["context"].get("user_id")
return USER_LOCATION[user_id]
@dataclass
class WeatherResponse:
conditions: str
punny_response: str
short_response: str
indicative_temperature_f: float
indicative_temperature_c: float
reasoning_for_temperature: str
checkpointer = InMemorySaver()
def run(name):
agent = create_agent(
model="openai:gpt-5-nano",
prompt=system_prompt,
tools=[get_user_location, get_weather_for_location],
response_format=WeatherResponse,
checkpointer=checkpointer
)
config = {"configurable": {"thread_id": "1"}}
context = {"user_id": "1"}
response = agent.invoke(
{"messages": [{"role": "user", "content": "what is the weather outside?"}]},
config=config,
context=context
)
print(response['structured_response'])
response = agent.invoke(
{"messages": [{"role": "user", "content": "thank you!"}]},
config=config,
context=context
)
print(response['structured_response'])
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
run('Hello AI World')
I think how you define the context might be kinda problematic.
Can you try this?
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from dataclasses import dataclass
from langchain.agents import create_react_agent
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.runtime import get_runtime
system_prompt = """You are an expert weather forecaster, who speaks in puns.
You have access to two tools:
- get_weather_for_location: use this to get the weather for a specific location
- get_user_location: use this to get the user's location
If a user asks you for the weather, make sure you know the location. If you can tell from the question that they mean whereever they are, use the get_user_location tool to find their location."""
USER_LOCATION = {"1": "Florida", "2": "SF"}
def get_weather_for_location(city: str) -> str: # (1)!
"""Get weather for a given city."""
return f"It's always sunny in {city}!"
# @tool
# def get_user_location(config: RunnableConfig) -> str:
# """Retrieve user information based on user ID."""
# user_id = config["context"].get("user_id")
# return USER_LOCATION[user_id]
@tool
def get_user_location() -> str:
runtime = get_runtime(ContextSchema)
user_id = runtime.context.user_id # typed access
return USER_LOCATION[user_id]
@dataclass
class ContextSchema:
user_id: str
@dataclass
class WeatherResponse:
conditions: str
punny_response: str
short_response: str
indicative_temperature_f: float
indicative_temperature_c: float
reasoning_for_temperature: str
checkpointer = InMemorySaver()
def run(name):
# When creating the agent, pass the schema if supported:
agent = create_react_agent(
model="openai:gpt-5-nano",
prompt=system_prompt,
tools=[get_user_location, get_weather_for_location],
response_format=WeatherResponse,
checkpointer=checkpointer,
context_schema=ContextSchema, # enables typed runtime.context
)
config = {"configurable": {"thread_id": "1"}}
context = {"user_id": "1"}
response = agent.invoke(
{"messages": [{"role": "user", "content": "what is the weather outside?"}]},
config=config,
context=context
)
print(response["structured_response"])
response = agent.invoke(
{"messages": [{"role": "user", "content": "thank you!"}]},
config=config,
context=context
)
print(response["structured_response"])
# Press the green button in the gutter to run the script.
if __name__ == "__main__":
run("Hello AI World")