feat: 优化 FAQ 处理和系统稳定性
- 添加本地 FAQ 库快速路径(问候语等社交响应) - 修复 Chatwoot 重启循环问题(PID 文件清理) - 添加 LLM 响应缓存(Redis 缓存,提升性能) - 添加智能推理模式(根据查询复杂度自动启用) - 添加订单卡片消息功能(Chatwoot 富媒体) - 增加 LLM 超时时间至 60 秒 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
121
agent/utils/faq_library.py
Normal file
121
agent/utils/faq_library.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""
|
||||
Local FAQ Library for instant responses
|
||||
Common questions can be answered immediately without API calls
|
||||
"""
|
||||
import re
|
||||
from typing import Optional, Dict
|
||||
from .logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class FAQLibrary:
|
||||
"""Local FAQ library for instant common question responses"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize FAQ library with ONLY common greetings and social responses
|
||||
|
||||
Note: Business-related FAQs (register, order, payment, shipment, return, etc.)
|
||||
should be handled by Strapi MCP to ensure accuracy and consistency.
|
||||
This library only contains instant social responses for better UX.
|
||||
"""
|
||||
self.faqs = {
|
||||
# ========== 问候类 Greetings ==========
|
||||
"你好": "你好!我是您的B2B客户服务助手,很高兴为您服务。我可以帮您处理订单查询、产品咨询、售后问题等。请问有什么可以帮到您的吗?",
|
||||
"您好": "您好!我是您的B2B客户服务助手,很高兴为您服务。我可以帮您处理订单查询、产品咨询、售后问题等。请问有什么可以帮到您的吗?",
|
||||
"hi": "Hello! I'm your B2B customer service assistant. How can I help you today?",
|
||||
"hello": "Hello! I'm here to assist you. How can I help you today?",
|
||||
"hey": "Hey there! How can I help you today?",
|
||||
|
||||
# ========== 感谢类 Gratitude ==========
|
||||
"谢谢": "不客气!如果还有其他问题,随时可以问我。祝您购物愉快!",
|
||||
"感谢": "感谢您的支持!如有任何问题,随时联系我们。",
|
||||
"thank you": "You're welcome! If you have any other questions, feel free to ask. Have a great day!",
|
||||
"thanks": "You're welcome! Let me know if you need anything else.",
|
||||
|
||||
# ========== 再见类 Farewell ==========
|
||||
"再见": "再见!如有需要,随时联系。祝您生活愉快!",
|
||||
"bye": "Goodbye! Feel free to reach out anytime. Have a great day!",
|
||||
"goodbye": "Goodbye! Have a wonderful day!",
|
||||
|
||||
# ========== 社交礼貌类 Social Politeness ==========
|
||||
"早上好": "早上好!很高兴为您服务。请问有什么可以帮到您的吗?",
|
||||
"下午好": "下午好!很高兴为您服务。请问有什么可以帮到您的吗?",
|
||||
"晚上好": "晚上好!很高兴为您服务。请问有什么可以帮到您的吗?",
|
||||
"good morning": "Good morning! How can I assist you today?",
|
||||
"good afternoon": "Good afternoon! How can I assist you today?",
|
||||
"good evening": "Good evening! How can I assist you today?",
|
||||
}
|
||||
|
||||
# Compile regex patterns for fuzzy matching
|
||||
self._compile_patterns()
|
||||
|
||||
def _compile_patterns(self):
|
||||
"""Compile regex patterns for fuzzy FAQ matching"""
|
||||
self.patterns = []
|
||||
for keyword, response in self.faqs.items():
|
||||
# Case-insensitive pattern with word boundaries
|
||||
pattern = re.compile(re.escape(keyword), re.IGNORECASE)
|
||||
self.patterns.append((pattern, response))
|
||||
|
||||
def find_match(self, query: str) -> Optional[str]:
|
||||
"""Find matching FAQ response
|
||||
|
||||
Args:
|
||||
query: User query text
|
||||
|
||||
Returns:
|
||||
Matching FAQ response or None if no match found
|
||||
"""
|
||||
# Remove HTML tags and extra whitespace
|
||||
clean_query = re.sub(r'<[^>]+>', '', query)
|
||||
clean_query = ' '.join(clean_query.split())
|
||||
|
||||
# Try exact match first
|
||||
if clean_query.lower() in (k.lower() for k in self.faqs.keys()):
|
||||
for key, response in self.faqs.items():
|
||||
if key.lower() == clean_query.lower():
|
||||
logger.info("FAQ exact match", key=key, query=clean_query[:50])
|
||||
return response
|
||||
|
||||
# Try fuzzy match (contains keyword)
|
||||
for pattern, response in self.patterns:
|
||||
if pattern.search(clean_query):
|
||||
logger.info("FAQ fuzzy match", pattern=pattern.pattern, query=clean_query[:50])
|
||||
return response
|
||||
|
||||
# No match found
|
||||
logger.debug("No FAQ match found", query=clean_query[:50])
|
||||
return None
|
||||
|
||||
def add_faq(self, keyword: str, response: str) -> None:
|
||||
"""Add or update FAQ entry
|
||||
|
||||
Args:
|
||||
keyword: Question keyword
|
||||
response: Answer text
|
||||
"""
|
||||
self.faqs[keyword] = response
|
||||
pattern = re.compile(re.escape(keyword), re.IGNORECASE)
|
||||
self.patterns.append((pattern, response))
|
||||
logger.info("FAQ added", keyword=keyword)
|
||||
|
||||
def get_all_keywords(self) -> list[str]:
|
||||
"""Get all FAQ keywords
|
||||
|
||||
Returns:
|
||||
List of FAQ keywords
|
||||
"""
|
||||
return list(self.faqs.keys())
|
||||
|
||||
|
||||
# Global FAQ library instance
|
||||
faq_library: Optional[FAQLibrary] = None
|
||||
|
||||
|
||||
def get_faq_library() -> FAQLibrary:
|
||||
"""Get or create global FAQ library instance"""
|
||||
global faq_library
|
||||
if faq_library is None:
|
||||
faq_library = FAQLibrary()
|
||||
return faq_library
|
||||
194
agent/utils/response_cache.py
Normal file
194
agent/utils/response_cache.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""
|
||||
LLM Response Cache for FAQ and common queries
|
||||
"""
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
from datetime import timedelta
|
||||
|
||||
from .cache import CacheManager
|
||||
from .logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
class ResponseCache:
|
||||
"""Cache LLM responses for common queries"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
cache_manager: Optional[CacheManager] = None,
|
||||
default_ttl: int = 3600 # 1 hour default
|
||||
):
|
||||
"""Initialize response cache
|
||||
|
||||
Args:
|
||||
cache_manager: Cache manager instance
|
||||
default_ttl: Default TTL in seconds for cached responses
|
||||
"""
|
||||
self.cache = cache_manager
|
||||
self.default_ttl = default_ttl
|
||||
|
||||
def _generate_key(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
temperature: float = 0.7,
|
||||
**kwargs: Any
|
||||
) -> str:
|
||||
"""Generate cache key from request parameters
|
||||
|
||||
Args:
|
||||
model: Model name
|
||||
messages: List of messages
|
||||
temperature: Temperature parameter
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
Cache key string
|
||||
"""
|
||||
# Create a normalized representation of the request
|
||||
cache_input = {
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
**{k: v for k, v in kwargs.items() if v is not None}
|
||||
}
|
||||
|
||||
# Hash the input to create a short, unique key
|
||||
cache_str = json.dumps(cache_input, sort_keys=True, ensure_ascii=False)
|
||||
cache_hash = hashlib.sha256(cache_str.encode()).hexdigest()[:16]
|
||||
|
||||
return f"llm_response:{model}:{cache_hash}"
|
||||
|
||||
async def get(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
temperature: float = 0.7,
|
||||
**kwargs: Any
|
||||
) -> Optional[str]:
|
||||
"""Get cached response if available
|
||||
|
||||
Args:
|
||||
model: Model name
|
||||
messages: List of messages
|
||||
temperature: Temperature parameter
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
Cached response content or None
|
||||
"""
|
||||
if not self.cache:
|
||||
return None
|
||||
|
||||
key = self._generate_key(model, messages, temperature, **kwargs)
|
||||
cached = await self.cache.get(key)
|
||||
|
||||
if cached:
|
||||
logger.info(
|
||||
"Cache hit",
|
||||
model=model,
|
||||
key=key,
|
||||
response_length=len(cached)
|
||||
)
|
||||
try:
|
||||
data = json.loads(cached)
|
||||
return data.get("response")
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Invalid cached data", key=key)
|
||||
return None
|
||||
|
||||
logger.debug("Cache miss", model=model, key=key)
|
||||
return None
|
||||
|
||||
async def set(
|
||||
self,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
response: str,
|
||||
temperature: float = 0.7,
|
||||
ttl: Optional[int] = None,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Cache LLM response
|
||||
|
||||
Args:
|
||||
model: Model name
|
||||
messages: List of messages
|
||||
response: Response content to cache
|
||||
temperature: Temperature parameter
|
||||
ttl: Time-to-live in seconds
|
||||
**kwargs: Additional parameters
|
||||
"""
|
||||
if not self.cache:
|
||||
return
|
||||
|
||||
key = self._generate_key(model, messages, temperature, **kwargs)
|
||||
ttl = ttl or self.default_ttl
|
||||
|
||||
# Store response with metadata
|
||||
data = {
|
||||
"response": response,
|
||||
"model": model,
|
||||
"response_length": len(response),
|
||||
"temperature": temperature
|
||||
}
|
||||
|
||||
await self.cache.set(
|
||||
key,
|
||||
json.dumps(data, ensure_ascii=False),
|
||||
ttl=ttl
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Response cached",
|
||||
model=model,
|
||||
key=key,
|
||||
response_length=len(response),
|
||||
ttl=ttl
|
||||
)
|
||||
|
||||
async def invalidate(self, pattern: str = "llm_response:*") -> int:
|
||||
"""Invalidate cached responses matching pattern
|
||||
|
||||
Args:
|
||||
pattern: Redis key pattern to match
|
||||
|
||||
Returns:
|
||||
Number of keys deleted
|
||||
"""
|
||||
if not self.cache:
|
||||
return 0
|
||||
|
||||
# This would need scan/delete operation
|
||||
# For now, just log
|
||||
logger.info("Cache invalidation requested", pattern=pattern)
|
||||
return 0
|
||||
|
||||
def get_cache_stats(self) -> dict[str, Any]:
|
||||
"""Get cache statistics
|
||||
|
||||
Returns:
|
||||
Dictionary with cache stats
|
||||
"""
|
||||
return {
|
||||
"enabled": self.cache is not None,
|
||||
"default_ttl": self.default_ttl
|
||||
}
|
||||
|
||||
|
||||
# Global response cache instance
|
||||
response_cache: Optional[ResponseCache] = None
|
||||
|
||||
|
||||
def get_response_cache() -> ResponseCache:
|
||||
"""Get or create global response cache instance"""
|
||||
global response_cache
|
||||
if response_cache is None:
|
||||
from .cache import get_cache_manager
|
||||
response_cache = ResponseCache(
|
||||
cache_manager=get_cache_manager(),
|
||||
default_ttl=3600 # 1 hour
|
||||
)
|
||||
return response_cache
|
||||
Reference in New Issue
Block a user