Files
assistant/agent/agents/customer_service.py

188 lines
5.5 KiB
Python
Raw Normal View History

"""
Customer Service Agent - Handles FAQ and general inquiries
"""
import json
from typing import Any
from core.state import AgentState, ConversationState, add_tool_call, set_response
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
CUSTOMER_SERVICE_PROMPT = """你是一个专业的 B2B 购物网站客服助手。
你的职责是回答用户的一般性问题包括
- 常见问题解答 (FAQ)
- 公司信息查询
- 政策咨询退换货政策隐私政策等
- 产品使用指南
- 其他一般性咨询
## 可用工具
你可以使用以下工具获取信息
1. **query_faq** - 搜索 FAQ 常见问题
- query: 搜索关键词
- category: 分类可选
2. **get_company_info** - 获取公司信息
- section: 信息类别about_us, contact, etc.
3. **get_policy** - 获取政策文档
- policy_type: 政策类型return_policy, privacy_policy, etc.
## 工具调用格式
当需要使用工具时请返回 JSON 格式
```json
{
"action": "call_tool",
"tool_name": "工具名称",
"arguments": {
"参数名": "参数值"
}
}
```
当可以直接回答时请返回
```json
{
"action": "respond",
"response": "回复内容"
}
```
当需要转人工时请返回
```json
{
"action": "handoff",
"reason": "转人工原因"
}
```
## 注意事项
- 保持专业友好的语气
- 如果不确定答案建议用户联系人工客服
- 不要编造信息只使用工具返回的数据
"""
async def customer_service_agent(state: AgentState) -> AgentState:
"""Customer service agent node
Handles FAQ, company info, and general inquiries using Strapi MCP tools.
Args:
state: Current agent state
Returns:
Updated state with tool calls or response
"""
logger.info(
"Customer service agent processing",
conversation_id=state["conversation_id"]
)
state["current_agent"] = "customer_service"
state["agent_history"].append("customer_service")
state["state"] = ConversationState.PROCESSING.value
# Check if we have tool results to process
if state["tool_results"]:
return await _generate_response_from_results(state)
# Build messages for LLM
messages = [
Message(role="system", content=CUSTOMER_SERVICE_PROMPT),
]
# Add conversation history
for msg in state["messages"][-6:]:
messages.append(Message(role=msg["role"], content=msg["content"]))
# Add current message
messages.append(Message(role="user", content=state["current_message"]))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
# Parse response
content = response.content.strip()
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
action = result.get("action")
if action == "call_tool":
# Add tool call to state
state = add_tool_call(
state,
tool_name=result["tool_name"],
arguments=result.get("arguments", {}),
server="strapi"
)
state["state"] = ConversationState.TOOL_CALLING.value
elif action == "respond":
state = set_response(state, result["response"])
state["state"] = ConversationState.GENERATING.value
elif action == "handoff":
state["requires_human"] = True
state["handoff_reason"] = result.get("reason", "User request")
return state
except json.JSONDecodeError:
# LLM returned plain text, use as response
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Customer service agent failed", error=str(e))
state["error"] = str(e)
return state
async def _generate_response_from_results(state: AgentState) -> AgentState:
"""Generate response based on tool results"""
# Build context from tool results
tool_context = []
for result in state["tool_results"]:
if result["success"]:
tool_context.append(f"工具 {result['tool_name']} 返回:\n{json.dumps(result['data'], ensure_ascii=False, indent=2)}")
else:
tool_context.append(f"工具 {result['tool_name']} 执行失败: {result['error']}")
prompt = f"""基于以下工具返回的信息,生成对用户的回复。
用户问题: {state["current_message"]}
工具返回信息:
{chr(10).join(tool_context)}
请生成一个友好专业的回复如果工具没有返回有用信息请诚实告知用户并建议其他方式获取帮助
只返回回复内容不要返回 JSON"""
messages = [
Message(role="system", content="你是一个专业的 B2B 客服助手,请根据工具返回的信息回答用户问题。"),
Message(role="user", content=prompt)
]
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Response generation failed", error=str(e))
state = set_response(state, "抱歉,处理您的请求时遇到问题。请稍后重试或联系人工客服。")
return state