Multi turn conversation

I have four nodes graph where first node:

  1. First node create user care plan given user’s situation
  2. Second node ask followup question
  3. Third one is human review, get user input
  4. Fourth is conclusion node

And also created FE using chainlit. Issue is when graph at human review node it work fine for first time it enters and also get response followup node. But at the second iteration graph stop without giving any output.

here is the code

"""
LangGraph implementation for care situation assessment chatbot.
Implements a structured conversation flow to gather information about user's care situation.
"""
import ast
import logging
from typing import Dict, Any, List, Optional, Annotated, Literal
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage, SystemMessage, BaseMessage
from langgraph.graph import StateGraph, END, add_messages
from pydantic import BaseModel
from config import settings
from careplan_o3 import careplan
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph.state import CompiledStateGraph
from langgraph.types import interrupt, Command
from langchain_core.language_models.chat_models import BaseChatModel

# Initialize the LLM
llm = ChatOpenAI(
    model=settings.openai_model,
    api_key=settings.openai_api_key
)

# Add this after imports
logger = logging.getLogger("graph")
logger.setLevel(logging.DEBUG)


class CareState(BaseModel):
    """State for care situation assessment conversation."""
    messages: Annotated[List[BaseMessage | Dict[str, Any]], add_messages] = []
    care_situation: Optional[str] = None
    follow_up_questions: List[str] = []
    assessment_complete: bool = False
    need_more_info: bool = False        # <--- NEW
    context: Dict[str, Any] = {}
    user_input: Optional[str] = None
    follow_up_node_response: Annotated[List[BaseMessage | Dict[str, Any]], add_messages] = []

class FollowUpResult(BaseModel):
    reply: str
    need_more_info: bool

def initial_assessment_node(state: CareState):
    """Analyze initial care situation and ask follow-up questions."""
    logger.debug(f"[initial_assessment_node] Entered with state: {state}")
    # Get the user's response about their care situation
    message = state.messages[-1]
    if isinstance(message, dict):
        user_message = message["content"]
    else:
        user_message = message.content

    response = careplan(llm, user_message)


    # Store the care situation
    state.care_situation = user_message

    # Add the assessment response

    logger.debug(
        f"[initial_assessment_node] Appended assistant message: {response.content}")

    logger.debug(
        f"[initial_assessment_node] Exiting with messages: {state.messages}")
    return {"messages": [AIMessage(content=response.content)]}


def follow_up_node(state: CareState):
    """Handle follow-up questions and additional information."""
    logger.debug(f"[follow_up_node] Entered with state: {state}")

    # Create conversation history
    conversation_history = state.messages

    # System prompt for follow-up processing
    system_prompt = f"""You are a compassionate {settings.app_name} continuing a conversation about care situations.

Your role is to:
1. Acknowledge the user's additional information
2. Determine if you have enough information to provide helpful guidance
3. If more information is needed, ask 1-2 specific follow-up questions
4. If sufficient information is provided, offer comprehensive advice and support

Return JSON with:
- reply: assistant message to send to the user
- need_more_info: boolean (True or False)

Previous care situation: {state.care_situation}"""

    # Generate response
    follow_up_llm: BaseChatModel = ChatOpenAI(
        model=settings.openai_model,
        api_key=settings.openai_api_key
    )

    response = follow_up_llm.invoke([
        SystemMessage(content=system_prompt),
        *conversation_history,
    ],response_format=FollowUpResult)

    safe_output = response.content.replace("true", "True").replace("false", "False")

    follow_up_result = eval(safe_output)
    # Add the response
    logger.debug(
        f"[follow_up_node] Appended assistant message: {response.content}")

    # Track the questions + loop flag
    need_more_info = bool(follow_up_result["need_more_info"])


    follow_up_response=[AIMessage(content=follow_up_result["reply"])]
    return {"messages": follow_up_response,
            "need_more_info": need_more_info,
            "follow_up_node_response": follow_up_response}

def human_review(state: CareState) -> Command[Literal["follow_up", "conclusion"]]:
    if not state.need_more_info:
        return Command(goto="conclusion")
    user_feedback = interrupt(
        {
            "need_more_info": state.need_more_info,
            "follow_up_node_response": state.follow_up_node_response[-1],
        }
    )
    print(f"[human_node] Received human feedback: {user_feedback}")
    return Command(update={"messages": [HumanMessage(content=user_feedback)]}, goto="follow_up")


def conclusion_node(state: CareState) -> CareState:
    """Provide final guidance and support."""
    logger.debug(f"[conclusion_node] Entered with state: {state}")
    # Create conversation history
    conversation_history = state.messages

    # System prompt for conclusion
    system_prompt = f"""You are a compassionate {settings.app_name} concluding a care situation conversation.

Your role is to:
1. Provide a helpful summary of the conversation
2. Offer specific, actionable advice based on the care situation
3. Suggest next steps or resources
4. Express support and encouragement
5. Let the user know they can ask more questions anytime

Care situation context: {state.care_situation}"""

    # Generate concluding response
    response = llm.invoke([
        SystemMessage(content=system_prompt),
        *conversation_history,
        {
            'role': 'user',
            'content': "Please provide a helpful conclusion and next steps for this care situation."
        }
    ])

    logger.debug(
        f"[conclusion_node] Appended assistant message: {response.content}")
    # state.current_step = "end"
    logger.debug(f"[conclusion_node] Exiting with messages: {state.messages}")
    return {"messages": [AIMessage(content=response.content)], "assessment_complete": True}



def create_care_graph() -> CompiledStateGraph[CareState]:
    """Create the care situation assessment graph."""
    workflow = StateGraph(CareState)

    # Add nodes
    workflow.add_node("initial_assessment", initial_assessment_node)
    workflow.add_node("follow_up", follow_up_node)
    workflow.add_node("conclusion", conclusion_node)
    workflow.add_node("human_review", human_review)

    # Add edges
    workflow.set_entry_point("initial_assessment")
    workflow.add_edge("initial_assessment", "follow_up")
    workflow.add_edge("follow_up", "human_review")
    workflow.set_finish_point("conclusion")

    memory = MemorySaver()
    return workflow.compile(checkpointer=memory)



# Initialize the graph
care_graph = create_care_graph()


@cl.on_message
async def on_message(message: cl.Message):
    """Handle incoming messages with interrupt/resume support"""
    configurations: RunnableConfig = {"configurable": {"thread_id": cl.context.session.id}}
    try:
        async for chunk  in graph.astream({'messages':[HumanMessage(content=message.content)]}, config=configurations):
            for node_id, value in chunk.items():
                if node_id == "__interrupt__":
                    while True:
                            if value[0].value["need_more_info"]:
                                user_res = await cl.AskUserMessage(content=value[0].value["follow_up_node_response"].content).send()
                                state = graph.invoke(Command(resume=user_res["output"]), config=configurations)
                            else:
                                await cl.Message(content=value[0].value["follow_up_node_response"][-1].content).send()
                                break
                if node_id == "follow_up":
                    ...
                elif assistant_messages := [m for m in get_messages(value) if isinstance(m, AIMessage)]:
                      await cl.Message(content=assistant_messages[-1].content).send()
    except Exception as e:
        logger.error(f"Error processing on message: {e}")

Hi!

It looks like your error is because your on_message handler doesn’t loop properly

You stream from the graph once, and then if you interrupt you enter a “while True” loop. However it’s important to note that this “while True” loop only continues if value[0].value has “need_more_info”. When you call graph.invoke a second time, you are not updating value - that is only taken from your initial streamed output. That’s why you aren’t successfully looping

Hi @xuro-langchain ,
Thank you for your valuable insight. If it’s not too much trouble, could you please share the refactored code?