Can a reasoning agent switch underlying models during execution?

Hi everyone,

I have a question about implementing reasoning agents in create_react_agent.

Is it possible for an agent to start its reasoning process using one model (e.g., GPT-4.1) but then, for specific steps or sub-tasks, switch to a different model (e.g., GPT-4.1-mini) before returning the final result?

The idea is that the “main” model handles the core reasoning, but for certain parts (like sub-queries or quick lookups), a lighter/cheaper model is used internally.

Thanks!

Hi,

I don’t believe this is supported using create_react_agent. You could accomplish this if you used a custom graph, but not using the prebuilt. Another approach you could consider is using gpt-5 which supports dynamic routing

No i am not using create react agent

This is my code:

async def pdf_fill_agent_scratch(

state: State, config: RunnableConfig

) -> Dict[str, List[AIMessage]]:

"""Call the LLM powering our "agent".



    This function prepares the prompt, initializes the model, and processes the response.



    Args:

        state (State): The current state of the conversation.

        config (RunnableConfig): Configuration for the model run.



    Returns:

        dict: A dictionary containing the model's response message.

    """

try:

logger.info("Entering pdf_fill_agent")

configuration = CombinedConfiguration.from_runnable_config(config)

coniguration_dict = asdict(configuration)

model_name = configuration.pdf_model

if model_name:

llm = load_chat_model(model_name)

else:

llm = ChatOpenAI(

model="gpt-4.1",

temperature=0,

            )



model = llm.bind_tools(PDF_FILL_TOOLS)

system_prompt_text = PDF_FILLING_PROMPTS_OPTIONS

creator_instructions = state.get("creator_instructions", "")

updated_form_field = state.get("updated_form_field", {})



if creator_instructions:

system_prompt_text += (

"\n\n# FORM FILLING INSTRUCTIONS FROM FORM CREATOR\n"

"Follow these guidelines to help the user correctly fill out the form:\n"

f"{creator_instructions}\n"

            )

# Prepare message list

messages:list = state.get("messages")



# Handle manually filled field

updated_form_field = state.get("updated_form_field")

if updated_form_field:

field_json = json.dumps(updated_form_field, ensure_ascii=False)

messages.append(

                {

"role": "user",

"content": f"The following field has been filled manually: {field_json}",

                }

            )



# Call model

response = cast(

AIMessage,

await model.ainvoke(

                [

                    {"role": "system", "content": system_prompt_text},

*messages,

                ],

config=coniguration_dict,

            ),

        )



if state["is_last_step"] and response.tool_calls:

logger.warning(

"could not find an answer  in the specified number of steps."

            )

return {

"messages": [

AIMessage(

id=response.id,

content="Sorry, I could not find an answer to your question in the specified number of steps.",

                    )

                ]

            }



return {"messages": [response]}

except Exception as e:

logger.exception("An error occurred in pdf_fill_agent_scratch: %s", e)

return {"error_messages": f"Error occcured isnide pdf agent node :{e}"}



def route_after_pdf_agent(state: State) -> Literal["__end__", "tools", "error"]:

"""Route to error node if error info present, else normal routing."""

if "error_messages" in state and state["error_messages"]:

return "error"

last_message = state.get("messages", [])[-1]

if not isinstance(last_message, AIMessage):

raise ValueError(

f"Expected AIMessage in output edges, but got {type(last_message).__name__}"

        )

if not last_message.tool_calls:

return "__end__"

return "tools"



builder = StateGraph(State, config_schema=CombinedConfiguration)

builder.add_node("pdf_agent", pdf_fill_agent_scratch)

builder.add_node("tools", ToolNode(PDF_FILL_TOOLS))

builder.add_node("error", validator_node)



builder.add_edge(START, "pdf_agent")

builder.add_conditional_edges("pdf_agent", route_after_pdf_agent, {

"__end__": "__end__",

"tools": "tools",

"error": "error"

})

builder.add_edge("tools", "pdf_agent")



graph = builder.compile()

graph.name = "pdf_agent"



async def pdf_node(state: State, config: RunnableConfig) -> Command[Literal["__end__"]]:

logger.info("Code agent starting task")

result = await graph.ainvoke(state, config)

inner_messages = result.get("messages", [])

if "error_messages" in result:

update = {"workflow_logs": result["error_messages"]}

return Command(update=update, goto="error")

return Command(

update={"messages": inner_messages},

goto="__end__",

    )

Yes, you could switch models, though for a use case like this I’m not sure how much value it would add

You would just have multiple models that you could call ainvoke() on, which you could instantiate globally or locally in the function. You’d have to bind all your tools to them, and then maybe choose which model to invoke based on the previous tool call

But deciding which tool to call next is generally a pretty flat level of reasoning load. I don’t think it’d be necessary for you in this case

1 Like