iss44
1
Hi,
Before Langgraph v1, developers could force tool use in create_react_agent by passing a bound llm model with
bind_tools( tools=tools, tool_choice="any", parallel_tool_calls=False )
However since LangGraph/Langchain v1, bound models are not supported in create_agent.
Is there a way we can force a create_agent to always call tools?
Hi @iss44
have you tried middlewares?
from typing import Callable, Awaitable
from dotenv import load_dotenv
from langchain.agents import create_agent
from langchain.agents.middleware import wrap_model_call, ModelRequest, ModelResponse
from langchain.chat_models import init_chat_model
from langchain.tools import ToolRuntime
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from pydantic import BaseModel, ConfigDict
load_dotenv(verbose=True)
llm = init_chat_model(
"claude-3-7-sonnet-latest"
)
class MyToolInput(BaseModel):
# Allow ToolRuntime (which contains BaseStore, etc.)
model_config = ConfigDict(arbitrary_types_allowed=True)
query: str
runtime: ToolRuntime # injected; hidden from the model
@tool(args_schema=MyToolInput, description="Ask for additional information")
def my_mood(query: str, runtime: ToolRuntime) -> str:
print(f"My Mood query: {query}")
return f"So so today"
@wrap_model_call
def force_tools(request: ModelRequest, handler: Callable[[ModelRequest], Awaitable[ModelResponse]]):
msgs = request.messages # excludes system prompt
is_first_llm_step = len(msgs) > 0 and isinstance(msgs[-1], HumanMessage)
if is_first_llm_step:
request = request.override(
tool_choice={"type": "tool", "name": "my_mood"},
model_settings={"parallel_tool_calls": False},
)
return handler(request)
agent = create_agent(
model=llm, # Chat model instance (not pre-bound)
tools=[my_mood], # Tools available to the agent
middleware=[force_tools],
)
answer = agent.invoke({"messages": [HumanMessage("Hi!")]}, config={"recursion_limit": 10})
for msg in answer["messages"]:
msg.pretty_print()
iss44
3
Thanks @pawel-twardziak that’s an elegant solution
1 Like