The root cause is the PostgreSQL connection is being opened once at startup and reused across requests, but it eventually gets closed (timeout, server reset, etc.).
class StoreManager:
def __init__(self, db_uri: str):
self.db_uri = db_uri
self._store = None
self._pool = None
def compiled_store(self):
"""
Returns a PostgresStore backed by a ConnectionPool.
The pool manages reconnections automatically, so stale/closed
connections are never an issue across requests.
"""
if self._store is None:
self._pool = ConnectionPool(
conninfo=self.db_uri,
min_size=2, # Keep at least 2 connections alive
max_size=10, # Allow up to 10 concurrent connections
open=True, # Open the pool immediately
)
self._store = PostgresStore(conn=self._pool)
self._store.setup() # Creates tables if they don't exist
return self._store
Hey @jayisampelli!
Great diagnosis - you’ve correctly pinpointed that the root cause is a single persistent connection going stale over time. Connection pooling is indeed the right solution. However, you don’t actually need to build a custom StoreManager class - PostgresStore already has built-in connection pooling support via the from_conn_string() factory method and its pool_config parameter.
PostgresStore.from_conn_string() accepts a pool_config argument of type PoolConfig that creates and manages a psycopg_pool.ConnectionPool for you under the hood:
from langgraph.store.postgres import PostgresStore, PoolConfig
DB_URI = "postgresql://postgres:postgres@localhost:5442/postgres?sslmode=disable"
with PostgresStore.from_conn_string(
DB_URI,
pool_config=PoolConfig(
min_size=2, # Minimum connections kept alive
max_size=10, # Maximum concurrent connections
),
) as store:
store.setup() # Run migrations (only needed once)
# Use the store -- connections are pooled and managed automatically
store.put(("user_123", "memories"), "pref-1", {"data": "User prefers dark mode"})
item = store.get(("user_123", "memories"), "pref-1")