Tool Output is empty in LangSmith

def get_template(query: str, runtime: ToolRuntime[RunnableConfig, WorkflowState] = None):
    """ 
    This tool retrieves relevant template documents from a vector store based on a user's query. 
    Uses connection pools for optimal performance across multiple invocations.
    
    Parameters:
    query: (string) The user's search query to find matching templates.
    Return: A list of up to 7 template documents that match the query.
    Constraint: This tool exclusively searches for documents with a metadata category of 'template'.
    """
    start_time = time.time()
    config = runtime.config if runtime else None
    
    # Get database and branch info from config
    if config:
        if config.get('metadata'):
            metadata = config.get('metadata', {})
            database_name = metadata.get('database_name', 'PHARMCARE_HIA')
            branch_id = metadata.get('branch_id', '1')
        elif config.get("configurable"):
            configurable = config.get("configurable", {})
            database_name = configurable.get('database_name', 'PHARMCARE_HIA')
            branch_id = configurable.get('branch_id', '1')
    else:
        database_name = 'PHARMCARE_HIA'
        branch_id = '1'
    
    collection_name = f"{database_name}-{branch_id}"
    print(f"🔍 Searching templates in collection: {collection_name}")
    
    try:
        # Use cached clients from connection pools
        genai_client = get_cached_genai_client("pharmcare-chatbot-429003", "us-central1")
        qdrant_client = get_cached_qdrant_client()
        embedding = VertexAIEmbeddings(model_name="gemini-embedding-001", project="pharmcare-chatbot-429003")
        if not genai_client:
            print("❌ Failed to get GenAI client from pool")
            return Command(update={
                "current_contexts": [],
                "messages": [ToolMessage(content="Failed to connect to search service", tool_call_id=runtime.tool_call_id if runtime else None)]
            })
        
        if not qdrant_client:
            print("❌ Failed to get Qdrant client from pool")
            return Command(update={
                "current_contexts": [],
                "messages": [ToolMessage(content="Failed to connect to database", tool_call_id=runtime.tool_call_id if runtime else None)]
            })
        
        # Generate embedding using cached GenAI client
        embedding_start = time.time()
        resp = embedding.embed(texts=[query], dimensions=768, embeddings_task_type="RETRIEVAL_QUERY")
        embedding_time = time.time() - embedding_start

        query_embedding = resp[0]
        if not query_embedding:
            print("❌ Failed to get embedding for query")
            return Command(update={
                "current_contexts": [],
                "messages": [ToolMessage(content="Failed to process query", tool_call_id=runtime.tool_call_id if runtime else None)]
            })
        
        # Search using cached Qdrant client
        search_start = time.time()
        value = ["template", "QnA", "Handover"]
        filter_conditions = [
            models.FieldCondition(
                key="metadata.category",
                match=models.MatchAny(any=value),
            ),
            models.FieldCondition(
                key="page_content",
                match=models.MatchExcept(**{"except": ["", "None", "none", "null"]})
            ),
        ]
        
        search_result = qdrant_client.search(
            collection_name=collection_name,
            query_vector=query_embedding,
            with_payload=True,
            limit=7,
            query_filter=models.Filter(must=filter_conditions),
        )
        search_time = time.time() - search_start
        
        # Create Document objects with metadata
        docs = []
        for point in search_result:
            metadata = point.payload.get('metadata', {})
            content = point.payload.get('page_content', '')
            score = point.score
            metadata['similarity_score'] = score
            
            # Skip empty content
            if not content or content.strip() == '':
                continue
            
            doc = Document(page_content=content, metadata=metadata)
            docs.append(doc)
        
        total_time = time.time() - start_time
        print(f"✅ Retrieved {len(docs)} template documents in {total_time:.3f}s (embedding: {embedding_time:.3f}s, search: {search_time:.3f}s)")
        
        # Return Command to update both messages and state
        return Command(update={
            "current_contexts": docs,  # Store Document objects in state
            "messages": [
                ToolMessage(
                    content=f"Found {len(docs)} templates. Documents: {format_docs(docs)}",
                    tool_call_id=runtime.tool_call_id if runtime else None
                )
            ]
        })
        
    except Exception as e:
        print(f"❌ Error in template search: {e}")
        return Command(update={
            "current_contexts": [],
            "messages": [ToolMessage(content=f"Search error: {str(e)}", tool_call_id=runtime.tool_call_id if runtime else None)]
        })

agent_llm = ChatVertexAI(model="gemini-2.5-flash", location="europe-central2", temperature=1, seed=42)
tools = [get_template]
agent_prompt = PromptTemplate.from_template(
    template=agent_prompt_2
)

# Create agent with custom state schema to track contexts
agent = create_agent(
    model=agent_llm, 
    tools=tools, 
    middleware=[bot_identity_role], 
    context_schema=identity,
    state_schema=CustomAgentState  # Add custom state schema
)



Why the template output is empty from langsmith?