agent = create_agent(
model=model,
tools=[generate_artifact],
checkpointer=memory,
state_schema=AiWriterState,
)
when generate_artifact complete, copilotkit received all tool call events an instant
agent = create_agent(
model=model,
tools=[generate_artifact],
checkpointer=memory,
state_schema=AiWriterState,
)
when generate_artifact complete, copilotkit received all tool call events an instant
I want to show the state of the tool call on the frontend
hi @itzhoujun
would that be possible for you the share the code? The graph and the copilotkit part would be great
useDefaultTool({
render: ({ name, args, status, result }) => {
return (
<div style={{ color: "black" }}>
<span>
{status === "complete" ? "✓" : "⏳"}
{name}
</span>
{status === "complete" && result && (
<pre>{JSON.stringify(result, null, 2)}</pre>
)}
</div>
);
},
});
// runtime
import express from 'express';
import {
CopilotRuntime,
ExperimentalEmptyAdapter,
copilotRuntimeNodeExpressEndpoint,
} from "@copilotkit/runtime";
import { LangGraphHttpAgent } from '@copilotkit/runtime/langgraph';
// import cors from 'cors';
const serviceAdapter = new ExperimentalEmptyAdapter()
const runtime = new CopilotRuntime({
agents: {
ai_writer: new LangGraphHttpAgent({
url: "http://localhost:9998/agents/ai_writer",
}),
},
});
const copilotRuntime = copilotRuntimeNodeExpressEndpoint({
endpoint: "/",
runtime,
serviceAdapter,
});
const app = express();
// 配置 CORS 允许跨域请求
// app.use(
// cors({
// origin: "*",
// methods: ["GET", "POST", "OPTIONS", "PUT", "DELETE", "PATCH"],
// allowedHeaders: ["*"],
// }),
// );
app.use("/api/copilotkit", copilotRuntime);
const port = Number(process.env.PORT || 3001);
app.listen(port, () => console.log(`Server listening on http://127.0.0.1:${port}`));
langchain:
def create_ai_writer_agent():
model = ChatOpenAI(model_name=llm_settings.LLM_MODEL,
base_url=llm_settings.LLM_BASE_URL, api_key=llm_settings.LLM_API_KEY)
memory = MemorySaver()
agent = create_agent(
model=model,
tools=[generate_artifact, rewrite_artifact, update_highlighted_text, analyze_video_script],
middleware=[CopilotKitMiddleware(), state_aware_prompt],
checkpointer=memory,
state_schema=AiWriterState,
)
return agent
def register_ai_writer_agent(app):
agent = create_ai_writer_agent()
add_langgraph_fastapi_endpoint(
app=app,
agent=LangGraphAGUIAgent(
name="ai_writer",
description="AI Writer Agent",
graph=agent,
),
path="/agents/ai_writer",
)
tool
@tool
async def generate_artifact(
requirements: str,
config: RunnableConfig = None,
runtime: ToolRuntime = None
):
"""
生成新内容
Args:
requirements: 用户的生成需求描述
"""
# 创建一个独立的模型实例,并传入空的 config 来避免被 agent 捕获
model = ChatOpenAI(
model_name=llm_settings.LLM_MODEL,
base_url=llm_settings.LLM_BASE_URL,
api_key=llm_settings.LLM_API_KEY
)
# 构建生成请求的消息
final_messages = [
SystemMessage(content=NEW_ARTIFACT_PROMPT),
HumanMessage(content=requirements)
]
content = ''
# 流式生成内容并实时更新前端
async for chunk in model.astream(final_messages, config={"callbacks": []}):
content += (chunk.content or "")
# 通过 copilotkit_emit_state 更新前端状态,这个不会影响 messages
artifact_data = Artifact(content=content)
await copilotkit_emit_state(config, {
"artifact": artifact_data
})
# 构建最终的 artifact
final_artifact = Artifact(content=content)
return Command(
update={
"artifact": final_artifact,
"messages": [
ToolMessage(
content=f"已成功生成新内容(共 {len(content)} 字)。",
tool_call_id=runtime.tool_call_id,
name="generate_artifact"
)
]
}
)
@pawel-twardziak hi, I think it could also be a problem with copilotkit or ag-ui
hi @itzhoujun
FYI, I’m still dogging into that issue.