Hi all, I was trying to create a branching prompt template. But for some reason, I could not make it work. I would be very grateful if someone could tell me why it is wrong. from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from dotenv import load_dotenv
load_dotenv()
llm = ChatOpenAI(model=“gpt-4”, temperature=0)
intent_prompt = PromptTemplate(
input_variables=\["query"\],
template="""
Classify the intent of this query: {query}
Categories: weather, news, search, database, unknown.
Just return the category.
"""
)
intent_chain = intent_prompt | llm | StrOutputParser()
followup_prompt = ChatPromptTemplate.from_template(
template="""
Category: {category}
{% if constraint %}
Please ensure to follow this rule: {constraint}
{% endif %}
Write a follow-up question for clarification.
"""
)
followup_chain = followup_prompt | llm | StrOutputParser()
pipeline = (
RunnableParallel(
category=intent_chain,
constraint=RunnablePassthrough(),
)
| followup_chain
)
output = pipeline.invoke({“query”: “Tell me something interesting about AI.”, “constraint”: “Keep it beginner-friendly.”})
print(output)