hi @AMuresan
in your case, when using chains, follow this:
import { ChatOpenAI } from "@langchain/openai";
import * as dotenv from "dotenv";
import { PromptTemplate } from "@langchain/core/prompts";
import { z } from "zod";
dotenv.config();
const Result = z
.object({
result: z.string().describe("Healthy lifestyle"),
})
.describe("Healthy lifestyle coach");
class MyLLM {
// private LLM: ReturnType<(ChatOpenAI['withConfig'])>;
private LLM: ChatOpenAI;
private getLLM() {
if (!this.LLM) {
this.LLM = new ChatOpenAI({
model: "gpt-5",
useResponsesApi: true,
/**
* this won't work because the reasoning config is not set on the LLM instance
*
* @see BaseChatOpenAI
*
* this.reasoning =
* fields?.reasoning ?? fields?.reasoningEffort
* ? { effort: fields.reasoningEffort }
* : undefined;
*/
// reasoning: { effort: "low", summary: "detailed" },
// reasoningEffort: "medium",
});
}
return this.LLM;
}
initializa() {
const llm = this.getLLM();
const prompt = PromptTemplate.fromTemplate(
"Is sport necessary for healthy life?",
);
const structuredLlm = llm.withStructuredOutput(Result, {
name: "result",
method: "jsonSchema",
strict: true,
includeRaw: true,
});
let chain = prompt.pipe(structuredLlm);
// chain = chain.pipe(async (response) => {
// const formatted_response = JSON.parse(
// response.filter((item) => item["type"] === "text")[0]["text"],
// ) as Record<string, boolean>;
// return formatted_response;
// });
return chain.withRetry({ stopAfterAttempt: 5 }).withConfig({
// @ts-ignore
reasoning: {
effort: "high",
summary: "auto" /* summary config if desired */,
},
});
}
}
(async () => {
const myLLM = new MyLLM();
const chain = myLLM.initializa();
const answer = await chain.invoke(
{},
{
/**
* Or pass reasoning config as a parameter to the invoke method
*/
// @ts-ignore
// reasoning: {
// effort: "high",
// summary: "auto" /* summary config if desired */,
// },
},
);
console.log(JSON.stringify(answer, null, 2));
})();
Which means:
- call
withConfigand use// @ts-ignore - pass configuration to
invokecall and use@ts-ignore