How save chat history on database like mongodb in LangChain 1.0.4

import asyncio
from asyncio import Lock
from typing import Dict

from aiohttp import web
import socketio

from dotenv import load_dotenv
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI


is_loaded = load_dotenv()
if not is_loaded:
    print("Can't load .env file")
    exit(0)

sio = socketio.AsyncServer(cors_allowed_origins='*')
app = web.Application()
sio.attach(app)

processing_locks: Dict[str, Lock] = {}

model = ChatOpenAI(
    model="gpt-4o-mini",
    temperature=0.1,
    max_tokens=1000,
    timeout=30
)

agent = create_agent(
    model=model,
    # system_prompt="You are a helpful assistant. Be concise and accurate."
    system_prompt="You are a chat bot."
)


@sio.event
def connect(sid, environ):
    print("connect ", sid)
    processing_locks[sid] = Lock()


@sio.event
def disconnect(sid):
    print('disconnect ', sid)

    if sid in processing_locks:
        del processing_locks[sid]
        print(f"🗑️ Cleaned up lock for {sid}")


@sio.on('chat_message')
async def handle_chat_message(sid, data):
    user = data.get('user', 'Unknown')
    message = data.get('message', '')
    print(f"đź’¬ Message from {user} ({sid}): {message}")

    if processing_locks[sid].locked():
        print(f"⚠️ Lock held for {sid}. Skipping message to prevent concurrency.")
        await sio.emit(
            'chat_message',
            {'user': 'System',
             'message': 'The Assistant is currently busy processing a previous request. Please wait.'},
            room=sid
        )
        return

    async with processing_locks[sid]:
        try:
            await sio.emit('processing_status', {'status': 'processing'}, room=sid)

            # await asyncio.sleep(5)
            config = {"configurable": {"session_id": sid}}
            agent_response = await asyncio.to_thread(
                agent.invoke,
                {"input": message},
                config=config
            )
            response_text = agent_response.get('messages', [])[0].content

            assistant_user = "Assistant"
            print(f"🤖 Assistant response to {user} ({sid}): {response_text}")

            await sio.emit(
                'chat_message',
                {'user': assistant_user, 'message': response_text},
                room=sid
            )

        except Exception as e:
            error_message = f"Error processing message with agent: {e}"
            print(f"🚨 {error_message}")

            await sio.emit(
                'chat_message',
                {'user': 'System Error', 'message': 'Sorry, the AI agent is offline or encountered an error.'},
                room=sid
            )
        finally:
            await sio.emit('processing_status', {'status': 'idle'}, room=sid)


if __name__ == '__main__':
    web.run_app(app, host='0.0.0.0', port=8080)

I want to keep the conversation for three reasons:

  1. To monitor usage for business purposes.

  2. To record all messages exchanged.

  3. To save experience for the future — to track what the user’s problem was and how it was resolved.

I don’t understand whether I should use LangGraph or just use it raw.

In the new version, it only has:

from langchain.agents
from langchain.tools
from langchain.messages
from langchain.chat_models
from langchain.embeddings
from langchain.rate_limiters

And it doesn’t have ConversationBufferMemory, and I couldn’t find a good example in the documentation.

If I want a regular chat where messages are stored in a structure like [{}] properly, so that I can later retrieve or track them, or eventually connect them to MongoDB, how should I do it?

I used to use the official OpenAI library and it was straightforward — I could just put the messages in a list and store the same messages in the database.

But with LangChain, I got confused!

Also, there are no examples on GitHub, or at least I couldn’t find any that are compatible with the latest changes.

Hi @omides248

could you reformat your post please? Right now I am having a hard time trying to read the code part :slight_smile:

thanks for the re-formating :wink:

imho

  • Use RunnableWithMessageHistory to manage short‑term chat history, and back it with a DB adapter (MongoDB) so every turn is automatically persisted.

  • If you’re using LangGraph agents (create_agent), add a checkpointer to persist the whole conversation state, or still wrap the agent with RunnableWithMessageHistory to store messages in MongoDB specifically.

LangChain (no LangGraph) — store chat in MongoDB via RunnableWithMessageHistory

pip install langchain-mongodb pymongo

Wrap your model/agent with RunnableWithMessageHistory. For your agent created with create_agent, configure input/output keys as messages, and provide a session-based factory that returns a Mongo-backed history.

from typing import Dict
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_mongodb.chat_message_histories import MongoDBChatMessageHistory

# 1) Define a factory that returns a Mongo-backed history for a session
def get_session_history(session_id: str) -> MongoDBChatMessageHistory:
    return MongoDBChatMessageHistory(
        connection_string="mongodb://localhost:27017",  # or Atlas URI
        session_id=session_id,
        database_name="chat_history",
        collection_name="message_store",
        # create_index=True (default) creates an index on SessionId
    )

# 2) Wrap your agent graph so history is auto-prepended and persisted
agent_with_history = RunnableWithMessageHistory(
    agent,
    get_session_history,
    input_messages_key="messages",
    output_messages_key="messages",
)

# 3) In your socket handler, pass the user turn and the session_id
#    IMPORTANT: v1 agents expect "messages" input (not "input")
config = {"configurable": {"session_id": sid}}
inputs = {"messages": [{"role": "user", "content": message}]}

result = await asyncio.to_thread(agent_with_history.invoke, inputs, config)
assistant_text = result["messages"][-1].content  # last message is the assistant
  • The user + assistant messages for each turn are appended to MongoDB automatically.
  • You can later page or filter the collection by SessionId to reconstruct a chat, monitor usage, and analyze outcomes.

LangGraph agent — persist conversation with a checkpointer (and/or MongoDB history)

If you’re already using the v1 agent factory (create_agent), you can persist the whole conversation state (messages, tool I/O, etc.) with a LangGraph checkpointer:

langgraph-checkpoint-mongodb · PyPI - seem to be outdated for now (langgraph/docs/docs/how-tos/memory/add-memory.md at main · langchain-ai/langgraph · GitHub) - ot sure it works

from langgraph.checkpoint.mongodb import MongoDBSaver
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI

model = ChatOpenAI(model="gpt-4o-mini", temperature=0.1)

with MongoDBSaver.from_conn_string("mongodb://user:pass@host:27017/?authSource=admin") as checkpointer:
    agent = create_agent(
        model=model,
        system_prompt="You are a chat bot.",
        checkpointer=checkpointer,
    )
    # Use a stable thread_id per conversation
    result = agent.invoke(
        {"messages": [{"role": "user", "content": "Hello"}]},
        config={"configurable": {"thread_id": "sid-123"}},
    )
    print(result["messages"][-1].content)
2 Likes