主要改进: - Agent 增强: 订单查询、售后支持、客服路由等功能优化 - 新增语言检测和 Token 管理模块 - 改进 Chatwoot webhook 处理和用户标识 - MCP 服务器增强: 订单 MCP 和 Strapi MCP 功能扩展 - 新增商城客户端、知识库、缓存和同步模块 - 添加多语言提示词系统 (YAML) - 完善项目结构: 整理文档、脚本和测试文件 - 新增调试和测试工具脚本 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
199 lines
7.1 KiB
Python
199 lines
7.1 KiB
Python
"""
|
|
Aftersale Agent - Handles returns, exchanges, and complaints
|
|
"""
|
|
import json
|
|
from typing import Any
|
|
|
|
from core.state import AgentState, ConversationState, add_tool_call, set_response, update_context
|
|
from core.llm import get_llm_client, Message
|
|
from prompts import get_prompt
|
|
from utils.logger import get_logger
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
|
async def aftersale_agent(state: AgentState) -> AgentState:
|
|
"""Aftersale agent node
|
|
|
|
Handles returns, exchanges, complaints and aftersale queries.
|
|
|
|
Args:
|
|
state: Current agent state
|
|
|
|
Returns:
|
|
Updated state with tool calls or response
|
|
"""
|
|
logger.info(
|
|
"Aftersale agent processing",
|
|
conversation_id=state["conversation_id"],
|
|
sub_intent=state.get("sub_intent")
|
|
)
|
|
|
|
state["current_agent"] = "aftersale"
|
|
state["agent_history"].append("aftersale")
|
|
state["state"] = ConversationState.PROCESSING.value
|
|
|
|
# Check if we have tool results to process
|
|
if state["tool_results"]:
|
|
return await _generate_aftersale_response(state)
|
|
|
|
# Get detected language
|
|
locale = state.get("detected_language", "en")
|
|
|
|
# Auto-query FAQ for return-related questions
|
|
message_lower = state["current_message"].lower()
|
|
faq_keywords = ["return", "refund", "defective", "exchange", "complaint", "damaged", "wrong", "missing"]
|
|
|
|
# 如果消息包含退货相关关键词,且没有工具调用记录,自动查询 FAQ
|
|
if any(keyword in message_lower for keyword in faq_keywords):
|
|
# 检查是否已经查询过 FAQ
|
|
tool_calls = state.get("tool_calls", [])
|
|
has_faq_query = any(tc.get("tool_name") in ["query_faq", "search_knowledge_base"] for tc in tool_calls)
|
|
|
|
if not has_faq_query:
|
|
logger.info(
|
|
"Auto-querying FAQ for return-related question",
|
|
conversation_id=state["conversation_id"]
|
|
)
|
|
|
|
# 自动添加 FAQ 工具调用
|
|
state = add_tool_call(
|
|
state,
|
|
tool_name="query_faq",
|
|
arguments={
|
|
"category": "return",
|
|
"locale": locale,
|
|
"limit": 5
|
|
},
|
|
server="strapi"
|
|
)
|
|
state["state"] = ConversationState.TOOL_CALLING.value
|
|
return state
|
|
|
|
# Build messages for LLM
|
|
# Load prompt in detected language
|
|
system_prompt = get_prompt("aftersale", locale)
|
|
|
|
messages = [
|
|
Message(role="system", content=system_prompt),
|
|
]
|
|
|
|
# Add conversation history
|
|
for msg in state["messages"][-8:]: # More history for aftersale context
|
|
messages.append(Message(role=msg["role"], content=msg["content"]))
|
|
|
|
# Build context info
|
|
context_info = f"User ID: {state['user_id']}\nAccount ID: {state['account_id']}\n"
|
|
|
|
if state["entities"]:
|
|
context_info += f"Extracted entities: {json.dumps(state['entities'], ensure_ascii=False)}\n"
|
|
|
|
if state["context"]:
|
|
context_info += f"Conversation context: {json.dumps(state['context'], ensure_ascii=False)}\n"
|
|
|
|
user_content = f"{context_info}\nUser message: {state['current_message']}"
|
|
messages.append(Message(role="user", content=user_content))
|
|
|
|
try:
|
|
llm = get_llm_client()
|
|
response = await llm.chat(messages, temperature=0.5)
|
|
|
|
# Parse response
|
|
content = response.content.strip()
|
|
if content.startswith("```"):
|
|
content = content.split("```")[1]
|
|
if content.startswith("json"):
|
|
content = content[4:]
|
|
|
|
result = json.loads(content)
|
|
action = result.get("action")
|
|
|
|
if action == "call_tool":
|
|
arguments = result.get("arguments", {})
|
|
arguments["user_id"] = state["user_id"]
|
|
|
|
# Use entity if available
|
|
if "order_id" not in arguments and state["entities"].get("order_id"):
|
|
arguments["order_id"] = state["entities"]["order_id"]
|
|
|
|
state = add_tool_call(
|
|
state,
|
|
tool_name=result["tool_name"],
|
|
arguments=arguments,
|
|
server="aftersale"
|
|
)
|
|
state["state"] = ConversationState.TOOL_CALLING.value
|
|
|
|
elif action == "ask_info":
|
|
state = set_response(state, result["question"])
|
|
state["state"] = ConversationState.AWAITING_INFO.value
|
|
# Store required fields in context for next iteration
|
|
if result.get("required_fields"):
|
|
state = update_context(state, {"required_fields": result["required_fields"]})
|
|
|
|
elif action == "respond":
|
|
state = set_response(state, result["response"])
|
|
state["state"] = ConversationState.GENERATING.value
|
|
|
|
elif action == "handoff":
|
|
state["requires_human"] = True
|
|
state["handoff_reason"] = result.get("reason", "Complex aftersale issue")
|
|
|
|
return state
|
|
|
|
except json.JSONDecodeError:
|
|
state = set_response(state, response.content)
|
|
return state
|
|
|
|
except Exception as e:
|
|
logger.error("Aftersale agent failed", error=str(e))
|
|
state["error"] = str(e)
|
|
return state
|
|
|
|
|
|
async def _generate_aftersale_response(state: AgentState) -> AgentState:
|
|
"""Generate response based on aftersale tool results"""
|
|
|
|
tool_context = []
|
|
for result in state["tool_results"]:
|
|
if result["success"]:
|
|
data = result["data"]
|
|
tool_context.append(f"Tool {result['tool_name']} returned:\n{json.dumps(data, ensure_ascii=False, indent=2)}")
|
|
|
|
# Extract aftersale_id for context
|
|
if isinstance(data, dict) and data.get("aftersale_id"):
|
|
state = update_context(state, {"aftersale_id": data["aftersale_id"]})
|
|
else:
|
|
tool_context.append(f"Tool {result['tool_name']} failed: {result['error']}")
|
|
|
|
prompt = f"""Based on the following aftersale system information, generate a response to the user.
|
|
|
|
User question: {state["current_message"]}
|
|
|
|
System returned information:
|
|
{chr(10).join(tool_context)}
|
|
|
|
Please generate a compassionate and professional response:
|
|
- If application submitted successfully, inform user of aftersale ID and next steps
|
|
- If status query, clearly explain current progress
|
|
- If application failed, explain reason and provide solution
|
|
- Show understanding for user's issue
|
|
|
|
Return only the response content, do not return JSON."""
|
|
|
|
messages = [
|
|
Message(role="system", content="You are a professional aftersale service assistant, please answer user's aftersale questions based on system returned information."),
|
|
Message(role="user", content=prompt)
|
|
]
|
|
|
|
try:
|
|
llm = get_llm_client()
|
|
response = await llm.chat(messages, temperature=0.7)
|
|
state = set_response(state, response.content)
|
|
return state
|
|
|
|
except Exception as e:
|
|
logger.error("Aftersale response generation failed", error=str(e))
|
|
state = set_response(state, "Sorry, there was a problem processing your aftersale request. Please try again later or contact customer support.")
|
|
return state
|