Files
assistant/agent/agents/router.py
wangliang 6b6172d8f0 feat: 优化 FAQ 处理和系统稳定性
- 添加本地 FAQ 库快速路径(问候语等社交响应)
- 修复 Chatwoot 重启循环问题(PID 文件清理)
- 添加 LLM 响应缓存(Redis 缓存,提升性能)
- 添加智能推理模式(根据查询复杂度自动启用)
- 添加订单卡片消息功能(Chatwoot 富媒体)
- 增加 LLM 超时时间至 60 秒

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2026-01-20 14:51:30 +08:00

205 lines
6.8 KiB
Python

"""
Router Agent - Intent recognition and routing
"""
import json
from typing import Any, Optional
from core.state import AgentState, Intent, ConversationState, set_intent, add_entity, set_language
from core.llm import get_llm_client, Message
from core.language_detector import get_cached_or_detect
from prompts import get_prompt
from utils.logger import get_logger
from utils.faq_library import get_faq_library
logger = get_logger(__name__)
async def classify_intent(state: AgentState) -> AgentState:
"""Classify user intent and extract entities
This is the first node in the workflow that analyzes the user's message
and determines which agent should handle it.
Args:
state: Current agent state
Returns:
Updated state with intent and entities
"""
logger.info(
"Classifying intent",
conversation_id=state["conversation_id"],
message=state["current_message"][:100]
)
state["state"] = ConversationState.CLASSIFYING.value
state["step_count"] += 1
# ========== FAST PATH: Check FAQ first BEFORE calling LLM ==========
# This avoids slow LLM calls for common questions
import re
clean_message = re.sub(r'<[^>]+>', '', state["current_message"])
clean_message = ' '.join(clean_message.split())
faq_library = get_faq_library()
faq_response = faq_library.find_match(clean_message)
if faq_response:
logger.info(
"FAQ matched at router level, skipping LLM classification",
conversation_id=state["conversation_id"],
message=clean_message[:50]
)
# Set to customer service intent and store FAQ response
state["intent"] = Intent.CUSTOMER_SERVICE.value
state["intent_confidence"] = 1.0 # High confidence for FAQ matches
state["faq_response"] = faq_response # Store FAQ response for later use
return state
# ==============================================================
# Detect language
detected_locale = get_cached_or_detect(state, state["current_message"])
confidence = 0.85 # Default confidence for language detection
state = set_language(state, detected_locale, confidence)
logger.info(
"Language detected",
locale=detected_locale,
confidence=confidence
)
# Build context from conversation history
context_summary = ""
if state["context"]:
context_parts = []
if state["context"].get("order_id"):
context_parts.append(f"Current order: {state['context']['order_id']}")
if state["context"].get("product_id"):
context_parts.append(f"Current product: {state['context']['product_id']}")
if context_parts:
context_summary = "\n".join(context_parts)
# Load prompt in detected language
classification_prompt = get_prompt("router", detected_locale)
# Build messages for LLM
messages = [
Message(role="system", content=classification_prompt),
]
# Add recent conversation history for context
for msg in state["messages"][-6:]: # Last 3 turns
messages.append(Message(role=msg["role"], content=msg["content"]))
# Add current message with context
user_content = f"User message: {state['current_message']}"
if context_summary:
user_content += f"\n\nCurrent context:\n{context_summary}"
messages.append(Message(role="user", content=user_content))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.3)
# Parse JSON response
content = response.content.strip()
# Log raw response for debugging
logger.debug(
"LLM response for intent classification",
response_preview=content[:500] if content else "EMPTY",
content_length=len(content) if content else 0
)
# Handle markdown code blocks
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
# Check for empty response
if not content:
logger.warning("LLM returned empty response for intent classification")
state["intent"] = Intent.CUSTOMER_SERVICE.value # Default to customer service
state["intent_confidence"] = 0.5
return state
result = json.loads(content)
# Extract intent
intent_str = result.get("intent", "unknown")
try:
intent = Intent(intent_str)
except ValueError:
intent = Intent.UNKNOWN
confidence = float(result.get("confidence", 0.5))
sub_intent = result.get("sub_intent")
# Set intent in state
state = set_intent(state, intent, confidence, sub_intent)
# Extract entities
entities = result.get("entities", {})
for entity_type, entity_value in entities.items():
if entity_value:
state = add_entity(state, entity_type, entity_value)
logger.info(
"Intent classified",
intent=intent.value,
confidence=confidence,
entities=list(entities.keys())
)
# Check if human handoff is needed
if intent == Intent.HUMAN_HANDOFF or confidence < 0.5:
state["requires_human"] = True
state["handoff_reason"] = result.get("reasoning", "Intent unclear")
return state
except json.JSONDecodeError as e:
logger.error("Failed to parse intent response", error=str(e))
state["intent"] = Intent.UNKNOWN.value
state["intent_confidence"] = 0.0
return state
except Exception as e:
logger.error("Intent classification failed", error=str(e))
state["error"] = str(e)
state["intent"] = Intent.UNKNOWN.value
return state
def route_by_intent(state: AgentState) -> str:
"""Route to appropriate agent based on intent
This is the routing function used by conditional edges in the graph.
Args:
state: Current agent state
Returns:
Name of the next node to execute
"""
intent = state.get("intent")
requires_human = state.get("requires_human", False)
# Human handoff takes priority
if requires_human:
return "human_handoff"
# Route based on intent
routing_map = {
Intent.CUSTOMER_SERVICE.value: "customer_service_agent",
Intent.ORDER.value: "order_agent",
Intent.AFTERSALE.value: "aftersale_agent",
Intent.PRODUCT.value: "product_agent",
Intent.HUMAN_HANDOFF.value: "human_handoff",
Intent.UNKNOWN.value: "customer_service_agent" # Default to customer service
}
return routing_map.get(intent, "customer_service_agent")