## 主要修复 ### 1. JSON 解析错误处理 - 修复所有 Agent 的 LLM 响应解析失败时返回原始内容的问题 - 当 JSON 解析失败时,返回友好的兜底消息而不是原始文本 - 影响文件: customer_service.py, order.py, product.py, aftersale.py ### 2. FAQ 快速路径修复 - 修复 customer_service.py 中变量定义顺序问题 - has_faq_query 在使用前未定义导致 NameError - 添加详细的错误日志记录 ### 3. Chatwoot 集成改进 - 添加响应内容调试日志 - 改进错误处理和日志记录 ### 4. 订单查询优化 - 将订单列表默认返回数量从 10 条改为 5 条 - 统一 MCP 工具层和 Mall Client 层的默认值 ### 5. 代码清理 - 删除所有测试代码和示例文件 - 刋试文件包括: test_*.py, test_*.html, test_*.sh - 删除测试目录: tests/, agent/tests/, agent/examples/ Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
205 lines
6.8 KiB
Python
205 lines
6.8 KiB
Python
"""
|
|
Router Agent - Intent recognition and routing
|
|
"""
|
|
import json
|
|
from typing import Any, Optional
|
|
|
|
from core.state import AgentState, Intent, ConversationState, set_intent, add_entity, set_language
|
|
from core.llm import get_llm_client, Message
|
|
from core.language_detector import get_cached_or_detect
|
|
from prompts import get_prompt
|
|
from utils.logger import get_logger
|
|
from utils.faq_library import get_faq_library
|
|
|
|
logger = get_logger(__name__)
|
|
|
|
|
|
async def classify_intent(state: AgentState) -> AgentState:
|
|
"""Classify user intent and extract entities
|
|
|
|
This is the first node in the workflow that analyzes the user's message
|
|
and determines which agent should handle it.
|
|
|
|
Args:
|
|
state: Current agent state
|
|
|
|
Returns:
|
|
Updated state with intent and entities
|
|
"""
|
|
logger.info(
|
|
"Classifying intent",
|
|
conversation_id=state["conversation_id"],
|
|
message=state["current_message"][:100]
|
|
)
|
|
|
|
state["state"] = ConversationState.CLASSIFYING.value
|
|
state["step_count"] += 1
|
|
|
|
# ========== FAST PATH: Check FAQ first BEFORE calling LLM ==========
|
|
# This avoids slow LLM calls for common questions
|
|
import re
|
|
clean_message = re.sub(r'<[^>]+>', '', state["current_message"])
|
|
clean_message = ' '.join(clean_message.split())
|
|
|
|
faq_library = get_faq_library()
|
|
faq_response = faq_library.find_match(clean_message)
|
|
|
|
if faq_response:
|
|
logger.info(
|
|
"FAQ matched at router level, skipping LLM classification",
|
|
conversation_id=state["conversation_id"],
|
|
message=clean_message[:50]
|
|
)
|
|
# Set to customer service intent and store FAQ response
|
|
state["intent"] = Intent.CUSTOMER_SERVICE.value
|
|
state["intent_confidence"] = 1.0 # High confidence for FAQ matches
|
|
state["faq_response"] = faq_response # Store FAQ response for later use
|
|
return state
|
|
# ==============================================================
|
|
|
|
# Detect language
|
|
detected_locale = get_cached_or_detect(state, state["current_message"])
|
|
confidence = 0.85 # Default confidence for language detection
|
|
state = set_language(state, detected_locale, confidence)
|
|
|
|
logger.info(
|
|
"Language detected",
|
|
locale=detected_locale,
|
|
confidence=confidence
|
|
)
|
|
|
|
# Build context from conversation history
|
|
context_summary = ""
|
|
if state["context"]:
|
|
context_parts = []
|
|
if state["context"].get("order_id"):
|
|
context_parts.append(f"Current order: {state['context']['order_id']}")
|
|
if state["context"].get("product_id"):
|
|
context_parts.append(f"Current product: {state['context']['product_id']}")
|
|
if context_parts:
|
|
context_summary = "\n".join(context_parts)
|
|
|
|
# Load prompt in detected language
|
|
classification_prompt = get_prompt("router", detected_locale)
|
|
|
|
# Build messages for LLM
|
|
messages = [
|
|
Message(role="system", content=classification_prompt),
|
|
]
|
|
|
|
# Add recent conversation history for context
|
|
for msg in state["messages"][-6:]: # Last 3 turns
|
|
messages.append(Message(role=msg["role"], content=msg["content"]))
|
|
|
|
# Add current message with context
|
|
user_content = f"User message: {state['current_message']}"
|
|
if context_summary:
|
|
user_content += f"\n\nCurrent context:\n{context_summary}"
|
|
|
|
messages.append(Message(role="user", content=user_content))
|
|
|
|
try:
|
|
llm = get_llm_client()
|
|
response = await llm.chat(messages, temperature=0.3)
|
|
|
|
# Parse JSON response
|
|
content = response.content.strip()
|
|
|
|
# Log raw response for debugging
|
|
logger.info(
|
|
"LLM response for intent classification",
|
|
response_preview=content[:500] if content else "EMPTY",
|
|
content_length=len(content) if content else 0
|
|
)
|
|
|
|
# Handle markdown code blocks
|
|
if content.startswith("```"):
|
|
content = content.split("```")[1]
|
|
if content.startswith("json"):
|
|
content = content[4:]
|
|
|
|
# Check for empty response
|
|
if not content:
|
|
logger.warning("LLM returned empty response for intent classification")
|
|
state["intent"] = Intent.CUSTOMER_SERVICE.value # Default to customer service
|
|
state["intent_confidence"] = 0.5
|
|
return state
|
|
|
|
result = json.loads(content)
|
|
|
|
# Extract intent
|
|
intent_str = result.get("intent", "unknown")
|
|
try:
|
|
intent = Intent(intent_str)
|
|
except ValueError:
|
|
intent = Intent.UNKNOWN
|
|
|
|
confidence = float(result.get("confidence", 0.5))
|
|
sub_intent = result.get("sub_intent")
|
|
|
|
# Set intent in state
|
|
state = set_intent(state, intent, confidence, sub_intent)
|
|
|
|
# Extract entities
|
|
entities = result.get("entities", {})
|
|
for entity_type, entity_value in entities.items():
|
|
if entity_value:
|
|
state = add_entity(state, entity_type, entity_value)
|
|
|
|
logger.info(
|
|
"Intent classified",
|
|
intent=intent.value,
|
|
confidence=confidence,
|
|
entities=list(entities.keys())
|
|
)
|
|
|
|
# Check if human handoff is needed
|
|
if intent == Intent.HUMAN_HANDOFF or confidence < 0.5:
|
|
state["requires_human"] = True
|
|
state["handoff_reason"] = result.get("reasoning", "Intent unclear")
|
|
|
|
return state
|
|
|
|
except json.JSONDecodeError as e:
|
|
logger.error("Failed to parse intent response", error=str(e))
|
|
state["intent"] = Intent.UNKNOWN.value
|
|
state["intent_confidence"] = 0.0
|
|
return state
|
|
|
|
except Exception as e:
|
|
logger.error("Intent classification failed", error=str(e))
|
|
state["error"] = str(e)
|
|
state["intent"] = Intent.UNKNOWN.value
|
|
return state
|
|
|
|
|
|
def route_by_intent(state: AgentState) -> str:
|
|
"""Route to appropriate agent based on intent
|
|
|
|
This is the routing function used by conditional edges in the graph.
|
|
|
|
Args:
|
|
state: Current agent state
|
|
|
|
Returns:
|
|
Name of the next node to execute
|
|
"""
|
|
intent = state.get("intent")
|
|
requires_human = state.get("requires_human", False)
|
|
|
|
# Human handoff takes priority
|
|
if requires_human:
|
|
return "human_handoff"
|
|
|
|
# Route based on intent
|
|
routing_map = {
|
|
Intent.CUSTOMER_SERVICE.value: "customer_service_agent",
|
|
Intent.ORDER.value: "order_agent",
|
|
Intent.AFTERSALE.value: "aftersale_agent",
|
|
Intent.PRODUCT.value: "product_agent",
|
|
Intent.HUMAN_HANDOFF.value: "human_handoff",
|
|
Intent.UNKNOWN.value: "customer_service_agent" # Default to customer service
|
|
}
|
|
|
|
return routing_map.get(intent, "customer_service_agent")
|