feat: 优化 FAQ 处理和系统稳定性

- 添加本地 FAQ 库快速路径(问候语等社交响应)
- 修复 Chatwoot 重启循环问题(PID 文件清理)
- 添加 LLM 响应缓存(Redis 缓存,提升性能)
- 添加智能推理模式(根据查询复杂度自动启用)
- 添加订单卡片消息功能(Chatwoot 富媒体)
- 增加 LLM 超时时间至 60 秒

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
wangliang
2026-01-20 14:51:30 +08:00
parent c4e97cf312
commit 6b6172d8f0
8 changed files with 684 additions and 18 deletions

View File

@@ -8,6 +8,7 @@ from core.state import AgentState, ConversationState, add_tool_call, set_respons
from core.llm import get_llm_client, Message
from prompts import get_prompt
from utils.logger import get_logger
from utils.faq_library import get_faq_library
logger = get_logger(__name__)
@@ -36,6 +37,32 @@ async def customer_service_agent(state: AgentState) -> AgentState:
if state["tool_results"]:
return await _generate_response_from_results(state)
# ========== FAST PATH: Check if FAQ was already matched at router ==========
# Router already checked FAQ and stored response if found
if "faq_response" in state and state["faq_response"]:
logger.info(
"Using FAQ response from router",
conversation_id=state["conversation_id"],
response_length=len(state["faq_response"])
)
return set_response(state, state["faq_response"])
# =========================================================================
# ========== FAST PATH: Check local FAQ library first (backup) ==========
# This provides instant response for common questions without API calls
# This is a fallback in case FAQ wasn't matched at router level
faq_library = get_faq_library()
faq_response = faq_library.find_match(state["current_message"])
if faq_response:
logger.info(
"FAQ match found, returning instant response",
conversation_id=state["conversation_id"],
response_length=len(faq_response)
)
return set_response(state, faq_response)
# ============================================================
# Get detected language
locale = state.get("detected_language", "en")

View File

@@ -9,6 +9,7 @@ from core.llm import get_llm_client, Message
from core.language_detector import get_cached_or_detect
from prompts import get_prompt
from utils.logger import get_logger
from utils.faq_library import get_faq_library
logger = get_logger(__name__)
@@ -34,6 +35,28 @@ async def classify_intent(state: AgentState) -> AgentState:
state["state"] = ConversationState.CLASSIFYING.value
state["step_count"] += 1
# ========== FAST PATH: Check FAQ first BEFORE calling LLM ==========
# This avoids slow LLM calls for common questions
import re
clean_message = re.sub(r'<[^>]+>', '', state["current_message"])
clean_message = ' '.join(clean_message.split())
faq_library = get_faq_library()
faq_response = faq_library.find_match(clean_message)
if faq_response:
logger.info(
"FAQ matched at router level, skipping LLM classification",
conversation_id=state["conversation_id"],
message=clean_message[:50]
)
# Set to customer service intent and store FAQ response
state["intent"] = Intent.CUSTOMER_SERVICE.value
state["intent_confidence"] = 1.0 # High confidence for FAQ matches
state["faq_response"] = faq_response # Store FAQ response for later use
return state
# ==============================================================
# Detect language
detected_locale = get_cached_or_detect(state, state["current_message"])
confidence = 0.85 # Default confidence for language detection