How to pass multiple variables to a chain?

When I run the following code, an exception occurs if I pass language as a variable.

import os
from langchain_community.chat_models import ChatTongyi
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate

llm = ChatTongyi(model="qwen-plus")

explaination_prompt = PromptTemplate(
    template="Explain me {topic} in easy language.",
    input_variables=["topic"]
)

summary_prompt = PromptTemplate(
    template="Summarize {value} in 3 points.",
    input_variables=["value"]
)

translate_prompt = PromptTemplate(
    template="Translate {summarize} into {language}",
    input_variables=["summarize", "language"]
)

parser = StrOutputParser()

chain = explaination_prompt | llm | parser | summary_prompt | llm | parser | translate_prompt | llm | parser

result = chain.invoke({"topic": "Generative AI", "language": "Chinese"})
print(result)

Hi @yanheng

The error happens because in LangChain Expression Language (LCEL), each step replaces the previous output - keys like language don’t automatically “carry through” unless you explicitly pass them along. Also, when a PromptTemplate expects specific variable names (e.g., value, summarize, language), you must map the upstream outputs to those names before the prompt runs.

  • Approach 1 (simple mapping with a dict and itemgetter): build the summary from topic, keep language from the original input, then translate.
from operator import itemgetter
from langchain_community.chat_models import ChatTongyi
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate

llm = ChatTongyi(model="qwen-plus")

explaination_prompt = PromptTemplate(
    template="Explain me {topic} in easy language.",
    input_variables=["topic"],
)

summary_prompt = PromptTemplate(
    template="Summarize {value} in 3 points.",
    input_variables=["value"],
)

translate_prompt = PromptTemplate(
    template="Translate {summarize} into {language}",
    input_variables=["summarize", "language"],
)

parser = StrOutputParser()

# pipeline that produces a summary from topic
explain_and_summarize = (
    explaination_prompt
    | llm
    | parser
    | (lambda s: {"value": s})  # map string to the expected key for summary_prompt
    | summary_prompt
    | llm
    | parser
)

chain = (
    {
        "summarize": explain_and_summarize,     # result string for {summarize}
        "language": itemgetter("language"),     # pass through original language
    }
    | translate_prompt
    | llm
    | parser
)

print(chain.invoke({"topic": "Generative AI", "language": "Chinese"}))
  • Approach 2 (carry inputs with assign): keep the original inputs around and add intermediate keys.
from langchain_core.runnables import RunnablePassthrough
from operator import itemgetter
from langchain_community.chat_models import ChatTongyi
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate

llm = ChatTongyi(model="qwen-plus")

explaination_prompt = PromptTemplate(
    template="Explain me {topic} in easy language.",
    input_variables=["topic"],
)

summary_prompt = PromptTemplate(
    template="Summarize {value} in 3 points.",
    input_variables=["value"],
)

translate_prompt = PromptTemplate(
    template="Translate {summarize} into {language}",
    input_variables=["summarize", "language"],
)

parser = StrOutputParser()

explain_chain = explaination_prompt | llm | parser
summary_chain = (lambda d: {"value": d["explanation"]}) | summary_prompt | llm | parser

chain = (
    RunnablePassthrough()  # starts with the original input dict
    .assign(explanation=explain_chain)  # adds explanation (uses {topic})
    .assign(summarize=summary_chain)    # adds summarize (uses explanation)
    | translate_prompt                  # uses {summarize} and original {language}
    | llm
    | parser
)

print(chain.invoke({"topic": "Generative AI", "language": "Chinese"}))
2 Likes

Thank you for your answer, I understand.

1 Like