Hi @aerickson-clt
have you tried that migration guide LangChain v1 migration guide - Docs by LangChain ?
In v1, dynamic prompts and message rewriting are handled via middleware. Use dynamic_prompt to compute a system prompt from state, and/or before_model to rewrite messages before the model is called.
The system_prompt parameter is static; dynamic behavior moved into middleware. This is called out in the v1 migration guide.
Option A: Dynamic system prompt via middleware
from typing import Any
from typing_extensions import Annotated
from langchain.chat_models import init_chat_model
from langchain.messages import HumanMessage
from langchain.agents import create_agent
from langchain.agents.middleware.types import AgentState, dynamic_prompt, ModelRequest
# Extend the agent state with your custom fields
class PlanExecuteState(AgentState[Any]):
plan: list[str]
past_steps: list[tuple[str, str]] # (step, result)
# Build the system prompt from the current state (v1 idiomatic)
@dynamic_prompt
def build_planner_agent_prompt(req: ModelRequest) -> str:
state = req.state # type: ignore[assignment]
current_plan = state.get("plan", [])
past_steps = state.get("past_steps", [])
base_prompt = (
"You are a planning expert responsible for creating and updating plans to accomplish objectives.\n\n"
"Your tool:\n- create_plan: Create or update the execution plan\n"
)
if past_steps:
base_prompt += "\nCOMPLETED WORK:\n" + "\n".join(
[f"✓ {step}\n Result: {result[:150]}..." for step, result in past_steps]
)
if current_plan:
base_prompt += "\n\nCURRENT PLAN:\n" + "\n".join(
[f"{i+1}. {step}" for i, step in enumerate(current_plan)]
)
else:
base_prompt += "\n\nCURRENT PLAN: (none – you need to create one or signal completion)"
return base_prompt
# Initialize model and create the agent with middleware
model = init_chat_model("openai:gpt-4o-mini")
planner_agent = create_agent(
model=model,
tools=[create_plan], # your tool(s)
state_schema=PlanExecuteState, # expose plan/past_steps in state
middleware=[build_planner_agent_prompt], # dynamic system prompt
)
# Invocation
result = planner_agent.invoke(
{
"messages": [HumanMessage("Plan how to achieve the goal")],
"plan": [],
"past_steps": [("researched", "Found three relevant sources.")],
}
)
What this does: @dynamic_prompt sets request.system_prompt each call using the live state (which includes your custom fields via state_schema). This replicates your v0 function-based prompt builder, but in v1’s middleware form.
Option B: Rewrite/trim messages in before_model
from langchain.agents.middleware.types import AgentMiddleware, AgentState
from langchain.messages import SystemMessage
class PlannerPromptMiddleware(AgentMiddleware[PlanExecuteState, Any]):
state_schema = PlanExecuteState
def before_model(self, state: PlanExecuteState, runtime) -> dict[str, Any] | None:
# Optionally inject a computed SystemMessage and/or trim history
current_plan = state.get("plan", [])
past_steps = state.get("past_steps", [])
messages = state.get("messages", [])
base_prompt = "You are a planning expert..." # build same as above
if past_steps:
base_prompt += "\n\nCOMPLETED WORK:\n" + "\n".join(
[f"✓ {s}\n Result: {r[:150]}..." for s, r in past_steps]
)
if current_plan:
base_prompt += "\n\nCURRENT PLAN:\n" + "\n".join(
[f"{i+1}. {step}" for i, step in enumerate(current_plan)]
)
else:
base_prompt += "\n\nCURRENT PLAN: (none – you need to create one or signal completion)"
return {
# Replace message list with fresh system + existing messages
"messages": [SystemMessage(content=base_prompt), *messages]
}
planner_agent = create_agent(
model=model,
tools=[create_plan],
state_schema=PlanExecuteState,
middleware=[PlannerPromptMiddleware()],
)
What this does: returns a state update that replaces messages before the model call, effectively prepending a dynamic SystemMessage (and letting you trim/transform history if needed).