feat: 初始化 B2B AI Shopping Assistant 项目

- 配置 Docker Compose 多服务编排
- 实现 Chatwoot + Agent 集成
- 配置 Strapi MCP 知识库
- 支持 7 种语言的 FAQ 系统
- 实现 LangGraph AI 工作流

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
wl
2026-01-14 19:25:22 +08:00
commit 3ad6eee0d9
59 changed files with 8078 additions and 0 deletions

31
agent/Dockerfile Normal file
View File

@@ -0,0 +1,31 @@
FROM python:3.11-slim
WORKDIR /app
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
curl \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements first for better caching
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY . .
# Create logs directory
RUN mkdir -p /app/logs
# Expose port
EXPOSE 8000
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8000/health || exit 1
# Run the application
CMD ["python", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--reload"]

0
agent/__init__.py Normal file
View File

15
agent/agents/__init__.py Normal file
View File

@@ -0,0 +1,15 @@
"""Agents package"""
from .router import classify_intent, route_by_intent
from .customer_service import customer_service_agent
from .order import order_agent
from .aftersale import aftersale_agent
from .product import product_agent
__all__ = [
"classify_intent",
"route_by_intent",
"customer_service_agent",
"order_agent",
"aftersale_agent",
"product_agent",
]

251
agent/agents/aftersale.py Normal file
View File

@@ -0,0 +1,251 @@
"""
Aftersale Agent - Handles returns, exchanges, and complaints
"""
import json
from typing import Any
from core.state import AgentState, ConversationState, add_tool_call, set_response, update_context
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
AFTERSALE_AGENT_PROMPT = """你是一个专业的 B2B 售后服务助手。
你的职责是帮助用户处理售后问题,包括:
- 退货申请
- 换货申请
- 投诉处理
- 工单创建
- 售后进度查询
## 可用工具
1. **apply_return** - 退货申请
- order_id: 订单号
- items: 退货商品列表 [{item_id, quantity, reason}]
- description: 问题描述
- images: 图片URL列表可选
2. **apply_exchange** - 换货申请
- order_id: 订单号
- items: 换货商品列表 [{item_id, reason}]
- description: 问题描述
3. **create_complaint** - 创建投诉
- type: 投诉类型product_quality/service/logistics/other
- title: 投诉标题
- description: 详细描述
- related_order_id: 关联订单号(可选)
- attachments: 附件URL列表可选
4. **create_ticket** - 创建工单
- category: 工单类别
- priority: 优先级low/medium/high/urgent
- title: 工单标题
- description: 详细描述
5. **query_aftersale_status** - 查询售后状态
- aftersale_id: 售后单号(可选,不填查询全部)
## 工具调用格式
当需要使用工具时,请返回 JSON 格式:
```json
{
"action": "call_tool",
"tool_name": "工具名称",
"arguments": {
"参数名": "参数值"
}
}
```
当需要向用户询问更多信息时:
```json
{
"action": "ask_info",
"question": "需要询问的问题",
"required_fields": ["需要收集的字段列表"]
}
```
当可以直接回答时:
```json
{
"action": "respond",
"response": "回复内容"
}
```
## 售后流程引导
退货流程:
1. 确认订单号和退货商品
2. 了解退货原因
3. 收集问题描述和图片(质量问题时)
4. 提交退货申请
5. 告知用户后续流程
换货流程:
1. 确认订单号和换货商品
2. 了解换货原因
3. 确认是否有库存
4. 提交换货申请
## 注意事项
- 售后申请需要完整信息才能提交
- 对用户的问题要表示理解和歉意
- 复杂投诉建议转人工处理
- 金额较大的退款需要特别确认
"""
async def aftersale_agent(state: AgentState) -> AgentState:
"""Aftersale agent node
Handles returns, exchanges, complaints and aftersale queries.
Args:
state: Current agent state
Returns:
Updated state with tool calls or response
"""
logger.info(
"Aftersale agent processing",
conversation_id=state["conversation_id"],
sub_intent=state.get("sub_intent")
)
state["current_agent"] = "aftersale"
state["agent_history"].append("aftersale")
state["state"] = ConversationState.PROCESSING.value
# Check if we have tool results to process
if state["tool_results"]:
return await _generate_aftersale_response(state)
# Build messages for LLM
messages = [
Message(role="system", content=AFTERSALE_AGENT_PROMPT),
]
# Add conversation history
for msg in state["messages"][-8:]: # More history for aftersale context
messages.append(Message(role=msg["role"], content=msg["content"]))
# Build context info
context_info = f"用户ID: {state['user_id']}\n账户ID: {state['account_id']}\n"
if state["entities"]:
context_info += f"已提取的信息: {json.dumps(state['entities'], ensure_ascii=False)}\n"
if state["context"]:
context_info += f"会话上下文: {json.dumps(state['context'], ensure_ascii=False)}\n"
user_content = f"{context_info}\n用户消息: {state['current_message']}"
messages.append(Message(role="user", content=user_content))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.5)
# Parse response
content = response.content.strip()
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
action = result.get("action")
if action == "call_tool":
arguments = result.get("arguments", {})
arguments["user_id"] = state["user_id"]
# Use entity if available
if "order_id" not in arguments and state["entities"].get("order_id"):
arguments["order_id"] = state["entities"]["order_id"]
state = add_tool_call(
state,
tool_name=result["tool_name"],
arguments=arguments,
server="aftersale"
)
state["state"] = ConversationState.TOOL_CALLING.value
elif action == "ask_info":
state = set_response(state, result["question"])
state["state"] = ConversationState.AWAITING_INFO.value
# Store required fields in context for next iteration
if result.get("required_fields"):
state = update_context(state, {"required_fields": result["required_fields"]})
elif action == "respond":
state = set_response(state, result["response"])
state["state"] = ConversationState.GENERATING.value
elif action == "handoff":
state["requires_human"] = True
state["handoff_reason"] = result.get("reason", "Complex aftersale issue")
return state
except json.JSONDecodeError:
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Aftersale agent failed", error=str(e))
state["error"] = str(e)
return state
async def _generate_aftersale_response(state: AgentState) -> AgentState:
"""Generate response based on aftersale tool results"""
tool_context = []
for result in state["tool_results"]:
if result["success"]:
data = result["data"]
tool_context.append(f"工具 {result['tool_name']} 返回:\n{json.dumps(data, ensure_ascii=False, indent=2)}")
# Extract aftersale_id for context
if isinstance(data, dict) and data.get("aftersale_id"):
state = update_context(state, {"aftersale_id": data["aftersale_id"]})
else:
tool_context.append(f"工具 {result['tool_name']} 执行失败: {result['error']}")
prompt = f"""基于以下售后系统返回的信息,生成对用户的回复。
用户问题: {state["current_message"]}
系统返回信息:
{chr(10).join(tool_context)}
请生成一个体贴、专业的回复:
- 如果是申请提交成功,告知用户售后单号和后续流程
- 如果是状态查询,清晰说明当前进度
- 如果申请失败,说明原因并提供解决方案
- 对用户的问题表示理解
只返回回复内容,不要返回 JSON。"""
messages = [
Message(role="system", content="你是一个专业的售后客服助手,请根据系统返回的信息回答用户的售后问题。"),
Message(role="user", content=prompt)
]
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Aftersale response generation failed", error=str(e))
state = set_response(state, "抱歉,处理售后请求时遇到问题。请稍后重试或联系人工客服。")
return state

View File

@@ -0,0 +1,187 @@
"""
Customer Service Agent - Handles FAQ and general inquiries
"""
import json
from typing import Any
from core.state import AgentState, ConversationState, add_tool_call, set_response
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
CUSTOMER_SERVICE_PROMPT = """你是一个专业的 B2B 购物网站客服助手。
你的职责是回答用户的一般性问题,包括:
- 常见问题解答 (FAQ)
- 公司信息查询
- 政策咨询(退换货政策、隐私政策等)
- 产品使用指南
- 其他一般性咨询
## 可用工具
你可以使用以下工具获取信息:
1. **query_faq** - 搜索 FAQ 常见问题
- query: 搜索关键词
- category: 分类(可选)
2. **get_company_info** - 获取公司信息
- section: 信息类别about_us, contact, etc.
3. **get_policy** - 获取政策文档
- policy_type: 政策类型return_policy, privacy_policy, etc.
## 工具调用格式
当需要使用工具时,请返回 JSON 格式:
```json
{
"action": "call_tool",
"tool_name": "工具名称",
"arguments": {
"参数名": "参数值"
}
}
```
当可以直接回答时,请返回:
```json
{
"action": "respond",
"response": "回复内容"
}
```
当需要转人工时,请返回:
```json
{
"action": "handoff",
"reason": "转人工原因"
}
```
## 注意事项
- 保持专业、友好的语气
- 如果不确定答案,建议用户联系人工客服
- 不要编造信息,只使用工具返回的数据
"""
async def customer_service_agent(state: AgentState) -> AgentState:
"""Customer service agent node
Handles FAQ, company info, and general inquiries using Strapi MCP tools.
Args:
state: Current agent state
Returns:
Updated state with tool calls or response
"""
logger.info(
"Customer service agent processing",
conversation_id=state["conversation_id"]
)
state["current_agent"] = "customer_service"
state["agent_history"].append("customer_service")
state["state"] = ConversationState.PROCESSING.value
# Check if we have tool results to process
if state["tool_results"]:
return await _generate_response_from_results(state)
# Build messages for LLM
messages = [
Message(role="system", content=CUSTOMER_SERVICE_PROMPT),
]
# Add conversation history
for msg in state["messages"][-6:]:
messages.append(Message(role=msg["role"], content=msg["content"]))
# Add current message
messages.append(Message(role="user", content=state["current_message"]))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
# Parse response
content = response.content.strip()
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
action = result.get("action")
if action == "call_tool":
# Add tool call to state
state = add_tool_call(
state,
tool_name=result["tool_name"],
arguments=result.get("arguments", {}),
server="strapi"
)
state["state"] = ConversationState.TOOL_CALLING.value
elif action == "respond":
state = set_response(state, result["response"])
state["state"] = ConversationState.GENERATING.value
elif action == "handoff":
state["requires_human"] = True
state["handoff_reason"] = result.get("reason", "User request")
return state
except json.JSONDecodeError:
# LLM returned plain text, use as response
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Customer service agent failed", error=str(e))
state["error"] = str(e)
return state
async def _generate_response_from_results(state: AgentState) -> AgentState:
"""Generate response based on tool results"""
# Build context from tool results
tool_context = []
for result in state["tool_results"]:
if result["success"]:
tool_context.append(f"工具 {result['tool_name']} 返回:\n{json.dumps(result['data'], ensure_ascii=False, indent=2)}")
else:
tool_context.append(f"工具 {result['tool_name']} 执行失败: {result['error']}")
prompt = f"""基于以下工具返回的信息,生成对用户的回复。
用户问题: {state["current_message"]}
工具返回信息:
{chr(10).join(tool_context)}
请生成一个友好、专业的回复。如果工具没有返回有用信息,请诚实告知用户并建议其他方式获取帮助。
只返回回复内容,不要返回 JSON。"""
messages = [
Message(role="system", content="你是一个专业的 B2B 客服助手,请根据工具返回的信息回答用户问题。"),
Message(role="user", content=prompt)
]
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Response generation failed", error=str(e))
state = set_response(state, "抱歉,处理您的请求时遇到问题。请稍后重试或联系人工客服。")
return state

231
agent/agents/order.py Normal file
View File

@@ -0,0 +1,231 @@
"""
Order Agent - Handles order-related queries and operations
"""
import json
from typing import Any
from core.state import AgentState, ConversationState, add_tool_call, set_response, update_context
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
ORDER_AGENT_PROMPT = """你是一个专业的 B2B 订单服务助手。
你的职责是帮助用户处理订单相关的问题,包括:
- 订单查询
- 物流跟踪
- 订单修改
- 订单取消
- 发票获取
## 可用工具
1. **query_order** - 查询订单
- order_id: 订单号(可选,不填则查询最近订单)
- date_start: 开始日期(可选)
- date_end: 结束日期(可选)
- status: 订单状态(可选)
2. **track_logistics** - 物流跟踪
- order_id: 订单号
- tracking_number: 物流单号(可选)
3. **modify_order** - 修改订单
- order_id: 订单号
- modifications: 修改内容address/items/quantity 等)
4. **cancel_order** - 取消订单
- order_id: 订单号
- reason: 取消原因
5. **get_invoice** - 获取发票
- order_id: 订单号
- invoice_type: 发票类型normal/vat
## 工具调用格式
当需要使用工具时,请返回 JSON 格式:
```json
{
"action": "call_tool",
"tool_name": "工具名称",
"arguments": {
"参数名": "参数值"
}
}
```
当需要向用户询问更多信息时:
```json
{
"action": "ask_info",
"question": "需要询问的问题"
}
```
当可以直接回答时:
```json
{
"action": "respond",
"response": "回复内容"
}
```
## 重要提示
- 订单修改和取消是敏感操作,需要确认订单号
- 如果用户没有提供订单号,先查询他的最近订单
- 物流查询需要订单号或物流单号
- 对于批量操作或大金额订单,建议转人工处理
"""
async def order_agent(state: AgentState) -> AgentState:
"""Order agent node
Handles order queries, tracking, modifications, and cancellations.
Args:
state: Current agent state
Returns:
Updated state with tool calls or response
"""
logger.info(
"Order agent processing",
conversation_id=state["conversation_id"],
sub_intent=state.get("sub_intent")
)
state["current_agent"] = "order"
state["agent_history"].append("order")
state["state"] = ConversationState.PROCESSING.value
# Check if we have tool results to process
if state["tool_results"]:
return await _generate_order_response(state)
# Build messages for LLM
messages = [
Message(role="system", content=ORDER_AGENT_PROMPT),
]
# Add conversation history
for msg in state["messages"][-6:]:
messages.append(Message(role=msg["role"], content=msg["content"]))
# Build context info
context_info = f"用户ID: {state['user_id']}\n账户ID: {state['account_id']}\n"
# Add entities if available
if state["entities"]:
context_info += f"已提取的信息: {json.dumps(state['entities'], ensure_ascii=False)}\n"
# Add existing context
if state["context"].get("order_id"):
context_info += f"当前讨论的订单号: {state['context']['order_id']}\n"
user_content = f"{context_info}\n用户消息: {state['current_message']}"
messages.append(Message(role="user", content=user_content))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.5)
# Parse response
content = response.content.strip()
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
action = result.get("action")
if action == "call_tool":
# Inject user context into arguments
arguments = result.get("arguments", {})
arguments["user_id"] = state["user_id"]
arguments["account_id"] = state["account_id"]
# Use entity if available
if "order_id" not in arguments and state["entities"].get("order_id"):
arguments["order_id"] = state["entities"]["order_id"]
state = add_tool_call(
state,
tool_name=result["tool_name"],
arguments=arguments,
server="order"
)
state["state"] = ConversationState.TOOL_CALLING.value
elif action == "ask_info":
state = set_response(state, result["question"])
state["state"] = ConversationState.AWAITING_INFO.value
elif action == "respond":
state = set_response(state, result["response"])
state["state"] = ConversationState.GENERATING.value
elif action == "handoff":
state["requires_human"] = True
state["handoff_reason"] = result.get("reason", "Complex order operation")
return state
except json.JSONDecodeError:
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Order agent failed", error=str(e))
state["error"] = str(e)
return state
async def _generate_order_response(state: AgentState) -> AgentState:
"""Generate response based on order tool results"""
# Build context from tool results
tool_context = []
for result in state["tool_results"]:
if result["success"]:
data = result["data"]
tool_context.append(f"工具 {result['tool_name']} 返回:\n{json.dumps(data, ensure_ascii=False, indent=2)}")
# Extract order_id for context
if isinstance(data, dict):
if data.get("order_id"):
state = update_context(state, {"order_id": data["order_id"]})
elif data.get("orders") and len(data["orders"]) > 0:
state = update_context(state, {"order_id": data["orders"][0].get("order_id")})
else:
tool_context.append(f"工具 {result['tool_name']} 执行失败: {result['error']}")
prompt = f"""基于以下订单系统返回的信息,生成对用户的回复。
用户问题: {state["current_message"]}
系统返回信息:
{chr(10).join(tool_context)}
请生成一个清晰、友好的回复,包含订单的关键信息(订单号、状态、金额、物流等)。
如果是物流信息,请按时间线整理展示。
只返回回复内容,不要返回 JSON。"""
messages = [
Message(role="system", content="你是一个专业的订单客服助手,请根据系统返回的信息回答用户的订单问题。"),
Message(role="user", content=prompt)
]
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Order response generation failed", error=str(e))
state = set_response(state, "抱歉,处理订单信息时遇到问题。请稍后重试或联系人工客服。")
return state

256
agent/agents/product.py Normal file
View File

@@ -0,0 +1,256 @@
"""
Product Agent - Handles product search, recommendations, and quotes
"""
import json
from typing import Any
from core.state import AgentState, ConversationState, add_tool_call, set_response, update_context
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
PRODUCT_AGENT_PROMPT = """你是一个专业的 B2B 商品顾问助手。
你的职责是帮助用户找到合适的商品,包括:
- 商品搜索
- 智能推荐
- B2B 询价
- 库存查询
- 商品详情
## 可用工具
1. **search_products** - 搜索商品
- query: 搜索关键词
- filters: 过滤条件category, price_range, brand 等)
- sort: 排序方式price_asc/price_desc/sales/latest
- page: 页码
- page_size: 每页数量
2. **get_product_detail** - 获取商品详情
- product_id: 商品ID
3. **recommend_products** - 智能推荐
- context: 推荐上下文(可包含当前查询、浏览历史等)
- limit: 推荐数量
4. **get_quote** - B2B 询价
- product_id: 商品ID
- quantity: 采购数量
- delivery_address: 收货地址(可选,用于计算运费)
5. **check_inventory** - 库存查询
- product_ids: 商品ID列表
- warehouse: 仓库(可选)
## 工具调用格式
当需要使用工具时,请返回 JSON 格式:
```json
{
"action": "call_tool",
"tool_name": "工具名称",
"arguments": {
"参数名": "参数值"
}
}
```
当需要向用户询问更多信息时:
```json
{
"action": "ask_info",
"question": "需要询问的问题"
}
```
当可以直接回答时:
```json
{
"action": "respond",
"response": "回复内容"
}
```
## B2B 询价特点
- 大批量采购通常有阶梯价格
- 可能需要考虑运费
- 企业客户可能有专属折扣
- 报价通常有有效期
## 商品推荐策略
- 根据用户采购历史推荐
- 根据当前查询语义推荐
- 根据企业行业特点推荐
- 根据季节性和热门商品推荐
## 注意事项
- 帮助用户准确描述需求
- 如果搜索结果太多,建议用户缩小范围
- 询价时确认数量,因为会影响价格
- 库存紧张时及时告知用户
"""
async def product_agent(state: AgentState) -> AgentState:
"""Product agent node
Handles product search, recommendations, quotes and inventory queries.
Args:
state: Current agent state
Returns:
Updated state with tool calls or response
"""
logger.info(
"Product agent processing",
conversation_id=state["conversation_id"],
sub_intent=state.get("sub_intent")
)
state["current_agent"] = "product"
state["agent_history"].append("product")
state["state"] = ConversationState.PROCESSING.value
# Check if we have tool results to process
if state["tool_results"]:
return await _generate_product_response(state)
# Build messages for LLM
messages = [
Message(role="system", content=PRODUCT_AGENT_PROMPT),
]
# Add conversation history
for msg in state["messages"][-6:]:
messages.append(Message(role=msg["role"], content=msg["content"]))
# Build context info
context_info = f"用户ID: {state['user_id']}\n账户ID: {state['account_id']}\n"
if state["entities"]:
context_info += f"已提取的信息: {json.dumps(state['entities'], ensure_ascii=False)}\n"
if state["context"].get("product_id"):
context_info += f"当前讨论的商品ID: {state['context']['product_id']}\n"
if state["context"].get("recent_searches"):
context_info += f"最近搜索: {state['context']['recent_searches']}\n"
user_content = f"{context_info}\n用户消息: {state['current_message']}"
messages.append(Message(role="user", content=user_content))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
# Parse response
content = response.content.strip()
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
action = result.get("action")
if action == "call_tool":
arguments = result.get("arguments", {})
# Inject context for recommendation
if result["tool_name"] == "recommend_products":
arguments["user_id"] = state["user_id"]
arguments["account_id"] = state["account_id"]
# Inject context for quote
if result["tool_name"] == "get_quote":
arguments["account_id"] = state["account_id"]
# Use entity if available
if "product_id" not in arguments and state["entities"].get("product_id"):
arguments["product_id"] = state["entities"]["product_id"]
if "quantity" not in arguments and state["entities"].get("quantity"):
arguments["quantity"] = state["entities"]["quantity"]
state = add_tool_call(
state,
tool_name=result["tool_name"],
arguments=arguments,
server="product"
)
state["state"] = ConversationState.TOOL_CALLING.value
elif action == "ask_info":
state = set_response(state, result["question"])
state["state"] = ConversationState.AWAITING_INFO.value
elif action == "respond":
state = set_response(state, result["response"])
state["state"] = ConversationState.GENERATING.value
return state
except json.JSONDecodeError:
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Product agent failed", error=str(e))
state["error"] = str(e)
return state
async def _generate_product_response(state: AgentState) -> AgentState:
"""Generate response based on product tool results"""
tool_context = []
for result in state["tool_results"]:
if result["success"]:
data = result["data"]
tool_context.append(f"工具 {result['tool_name']} 返回:\n{json.dumps(data, ensure_ascii=False, indent=2)}")
# Extract product context
if isinstance(data, dict):
if data.get("product_id"):
state = update_context(state, {"product_id": data["product_id"]})
if data.get("products"):
# Store recent search results
product_ids = [p.get("product_id") for p in data["products"][:5]]
state = update_context(state, {"recent_product_ids": product_ids})
else:
tool_context.append(f"工具 {result['tool_name']} 执行失败: {result['error']}")
prompt = f"""基于以下商品系统返回的信息,生成对用户的回复。
用户问题: {state["current_message"]}
系统返回信息:
{chr(10).join(tool_context)}
请生成一个清晰、有帮助的回复:
- 如果是搜索结果,展示商品名称、价格、规格等关键信息
- 如果是询价结果,清晰说明单价、总价、折扣、有效期等
- 如果是推荐商品,简要说明推荐理由
- 如果是库存查询,告知可用数量和发货时间
- 结果较多时可以总结关键信息
只返回回复内容,不要返回 JSON。"""
messages = [
Message(role="system", content="你是一个专业的商品顾问,请根据系统返回的信息回答用户的商品问题。"),
Message(role="user", content=prompt)
]
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.7)
state = set_response(state, response.content)
return state
except Exception as e:
logger.error("Product response generation failed", error=str(e))
state = set_response(state, "抱歉,处理商品信息时遇到问题。请稍后重试或联系人工客服。")
return state

220
agent/agents/router.py Normal file
View File

@@ -0,0 +1,220 @@
"""
Router Agent - Intent recognition and routing
"""
import json
from typing import Any, Optional
from core.state import AgentState, Intent, ConversationState, set_intent, add_entity
from core.llm import get_llm_client, Message
from utils.logger import get_logger
logger = get_logger(__name__)
# Intent classification prompt
CLASSIFICATION_PROMPT = """你是一个 B2B 购物网站的智能助手路由器。
你的任务是分析用户消息,识别用户意图并提取关键实体。
## 可用意图分类
1. **customer_service** - 通用咨询
- FAQ 问答
- 产品使用问题
- 公司信息查询
- 政策咨询(退换货政策、隐私政策等)
2. **order** - 订单相关
- 订单查询("我的订单在哪""查一下订单"
- 物流跟踪("快递到哪了""什么时候到货"
- 订单修改("改一下收货地址""修改订单数量"
- 订单取消("取消订单""不想要了"
- 发票查询("开发票""要发票"
3. **aftersale** - 售后服务
- 退货申请("退货""不满意想退"
- 换货申请("换货""换一个"
- 投诉("投诉""服务态度差"
- 工单/问题反馈
4. **product** - 商品相关
- 商品搜索("有没有xx""找一下xx"
- 商品推荐("推荐""有什么好的"
- 询价("多少钱""批发价""大量购买价格"
- 库存查询("有货吗""还有多少"
5. **human_handoff** - 需要转人工
- 用户明确要求转人工
- 复杂问题 AI 无法处理
- 敏感问题需要人工处理
## 实体提取
请从消息中提取以下实体(如果存在):
- order_id: 订单号(如 ORD123456
- product_id: 商品ID
- product_name: 商品名称
- quantity: 数量
- date_reference: 时间引用(今天、昨天、上周、具体日期等)
- tracking_number: 物流单号
- phone: 电话号码
- address: 地址信息
## 输出格式
请以 JSON 格式返回,包含以下字段:
```json
{
"intent": "意图分类",
"confidence": 0.95,
"sub_intent": "子意图(可选)",
"entities": {
"entity_type": "entity_value"
},
"reasoning": "简短的推理说明"
}
```
## 注意事项
- 如果意图不明确,置信度应该较低
- 如果无法确定意图,返回 "unknown"
- 实体提取要准确,没有的字段不要填写
"""
async def classify_intent(state: AgentState) -> AgentState:
"""Classify user intent and extract entities
This is the first node in the workflow that analyzes the user's message
and determines which agent should handle it.
Args:
state: Current agent state
Returns:
Updated state with intent and entities
"""
logger.info(
"Classifying intent",
conversation_id=state["conversation_id"],
message=state["current_message"][:100]
)
state["state"] = ConversationState.CLASSIFYING.value
state["step_count"] += 1
# Build context from conversation history
context_summary = ""
if state["context"]:
context_parts = []
if state["context"].get("order_id"):
context_parts.append(f"当前讨论的订单: {state['context']['order_id']}")
if state["context"].get("product_id"):
context_parts.append(f"当前讨论的商品: {state['context']['product_id']}")
if context_parts:
context_summary = "\n".join(context_parts)
# Build messages for LLM
messages = [
Message(role="system", content=CLASSIFICATION_PROMPT),
]
# Add recent conversation history for context
for msg in state["messages"][-6:]: # Last 3 turns
messages.append(Message(role=msg["role"], content=msg["content"]))
# Add current message with context
user_content = f"用户消息: {state['current_message']}"
if context_summary:
user_content += f"\n\n当前上下文:\n{context_summary}"
messages.append(Message(role="user", content=user_content))
try:
llm = get_llm_client()
response = await llm.chat(messages, temperature=0.3)
# Parse JSON response
content = response.content.strip()
# Handle markdown code blocks
if content.startswith("```"):
content = content.split("```")[1]
if content.startswith("json"):
content = content[4:]
result = json.loads(content)
# Extract intent
intent_str = result.get("intent", "unknown")
try:
intent = Intent(intent_str)
except ValueError:
intent = Intent.UNKNOWN
confidence = float(result.get("confidence", 0.5))
sub_intent = result.get("sub_intent")
# Set intent in state
state = set_intent(state, intent, confidence, sub_intent)
# Extract entities
entities = result.get("entities", {})
for entity_type, entity_value in entities.items():
if entity_value:
state = add_entity(state, entity_type, entity_value)
logger.info(
"Intent classified",
intent=intent.value,
confidence=confidence,
entities=list(entities.keys())
)
# Check if human handoff is needed
if intent == Intent.HUMAN_HANDOFF or confidence < 0.5:
state["requires_human"] = True
state["handoff_reason"] = result.get("reasoning", "Intent unclear")
return state
except json.JSONDecodeError as e:
logger.error("Failed to parse intent response", error=str(e))
state["intent"] = Intent.UNKNOWN.value
state["intent_confidence"] = 0.0
return state
except Exception as e:
logger.error("Intent classification failed", error=str(e))
state["error"] = str(e)
state["intent"] = Intent.UNKNOWN.value
return state
def route_by_intent(state: AgentState) -> str:
"""Route to appropriate agent based on intent
This is the routing function used by conditional edges in the graph.
Args:
state: Current agent state
Returns:
Name of the next node to execute
"""
intent = state.get("intent")
requires_human = state.get("requires_human", False)
# Human handoff takes priority
if requires_human:
return "human_handoff"
# Route based on intent
routing_map = {
Intent.CUSTOMER_SERVICE.value: "customer_service_agent",
Intent.ORDER.value: "order_agent",
Intent.AFTERSALE.value: "aftersale_agent",
Intent.PRODUCT.value: "product_agent",
Intent.HUMAN_HANDOFF.value: "human_handoff",
Intent.UNKNOWN.value: "customer_service_agent" # Default to customer service
}
return routing_map.get(intent, "customer_service_agent")

60
agent/config.py Normal file
View File

@@ -0,0 +1,60 @@
"""
Configuration management for B2B Shopping AI Assistant
"""
from typing import Optional
from pydantic_settings import BaseSettings
from pydantic import Field
class Settings(BaseSettings):
"""Application settings loaded from environment variables"""
# ============ AI Model ============
zhipu_api_key: str = Field(..., description="ZhipuAI API Key")
zhipu_model: str = Field(default="glm-4", description="ZhipuAI Model name")
# ============ Redis ============
redis_host: str = Field(default="localhost", description="Redis host")
redis_port: int = Field(default=6379, description="Redis port")
redis_password: Optional[str] = Field(default=None, description="Redis password")
redis_db: int = Field(default=0, description="Redis database number")
# ============ Chatwoot ============
chatwoot_api_url: str = Field(..., description="Chatwoot API URL")
chatwoot_api_token: str = Field(..., description="Chatwoot API Token")
chatwoot_webhook_secret: Optional[str] = Field(default=None, description="Chatwoot Webhook Secret")
# ============ Strapi CMS ============
strapi_api_url: str = Field(..., description="Strapi API URL")
strapi_api_token: str = Field(..., description="Strapi API Token")
# ============ Hyperf API ============
hyperf_api_url: str = Field(..., description="Hyperf API URL")
hyperf_api_token: str = Field(..., description="Hyperf API Token")
# ============ MCP Servers ============
strapi_mcp_url: str = Field(default="http://localhost:8001", description="Strapi MCP URL")
order_mcp_url: str = Field(default="http://localhost:8002", description="Order MCP URL")
aftersale_mcp_url: str = Field(default="http://localhost:8003", description="Aftersale MCP URL")
product_mcp_url: str = Field(default="http://localhost:8004", description="Product MCP URL")
# ============ Application Config ============
log_level: str = Field(default="INFO", description="Log level")
max_conversation_steps: int = Field(default=10, description="Max steps in conversation")
conversation_timeout: int = Field(default=3600, description="Conversation timeout in seconds")
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
case_sensitive = False
# Global settings instance
settings = Settings()
def get_redis_url() -> str:
"""Get Redis connection URL"""
if settings.redis_password and settings.redis_password.strip():
return f"redis://:{settings.redis_password}@{settings.redis_host}:{settings.redis_port}/{settings.redis_db}"
return f"redis://{settings.redis_host}:{settings.redis_port}/{settings.redis_db}"

18
agent/core/__init__.py Normal file
View File

@@ -0,0 +1,18 @@
"""Agent core package"""
from .state import AgentState, Intent, ConversationState, create_initial_state
from .llm import ZhipuLLMClient, get_llm_client, Message, LLMResponse
from .graph import create_agent_graph, get_agent_graph, process_message
__all__ = [
"AgentState",
"Intent",
"ConversationState",
"create_initial_state",
"ZhipuLLMClient",
"get_llm_client",
"Message",
"LLMResponse",
"create_agent_graph",
"get_agent_graph",
"process_message",
]

404
agent/core/graph.py Normal file
View File

@@ -0,0 +1,404 @@
"""
LangGraph workflow definition for B2B Shopping AI Assistant
"""
from typing import Literal
import httpx
from langgraph.graph import StateGraph, END
from .state import AgentState, ConversationState, mark_finished, add_tool_result, set_response
from agents.router import classify_intent, route_by_intent
from agents.customer_service import customer_service_agent
from agents.order import order_agent
from agents.aftersale import aftersale_agent
from agents.product import product_agent
from config import settings
from utils.logger import get_logger
logger = get_logger(__name__)
# ============ Node Functions ============
async def receive_message(state: AgentState) -> AgentState:
"""Receive and preprocess incoming message
This is the entry point of the workflow.
"""
logger.info(
"Receiving message",
conversation_id=state["conversation_id"],
message_length=len(state["current_message"])
)
# Add user message to history
state["messages"].append({
"role": "user",
"content": state["current_message"]
})
state["state"] = ConversationState.INITIAL.value
return state
async def call_mcp_tools(state: AgentState) -> AgentState:
"""Execute pending MCP tool calls
Calls the appropriate MCP server based on the tool_calls in state.
"""
if not state["tool_calls"]:
logger.debug("No tool calls to execute")
return state
logger.info(
"Executing MCP tools",
tool_count=len(state["tool_calls"])
)
# MCP server URL mapping
mcp_servers = {
"strapi": settings.strapi_mcp_url,
"order": settings.order_mcp_url,
"aftersale": settings.aftersale_mcp_url,
"product": settings.product_mcp_url
}
async with httpx.AsyncClient(timeout=30.0) as client:
for tool_call in state["tool_calls"]:
server = tool_call["server"]
tool_name = tool_call["tool_name"]
arguments = tool_call["arguments"]
server_url = mcp_servers.get(server)
if not server_url:
state = add_tool_result(
state,
tool_name=tool_name,
success=False,
data=None,
error=f"Unknown MCP server: {server}"
)
continue
try:
# Call MCP tool endpoint
response = await client.post(
f"{server_url}/tools/{tool_name}",
json=arguments
)
response.raise_for_status()
result = response.json()
state = add_tool_result(
state,
tool_name=tool_name,
success=True,
data=result
)
logger.debug(
"Tool executed successfully",
tool=tool_name,
server=server
)
except httpx.HTTPStatusError as e:
logger.error(
"Tool HTTP error",
tool=tool_name,
status=e.response.status_code
)
state = add_tool_result(
state,
tool_name=tool_name,
success=False,
data=None,
error=f"HTTP {e.response.status_code}"
)
except Exception as e:
logger.error("Tool execution failed", tool=tool_name, error=str(e))
state = add_tool_result(
state,
tool_name=tool_name,
success=False,
data=None,
error=str(e)
)
# Clear pending tool calls
state["tool_calls"] = []
return state
async def human_handoff(state: AgentState) -> AgentState:
"""Handle transfer to human agent
Sets up the state for human intervention.
"""
logger.info(
"Human handoff requested",
conversation_id=state["conversation_id"],
reason=state.get("handoff_reason")
)
state["state"] = ConversationState.HUMAN_REVIEW.value
# Generate handoff message
reason = state.get("handoff_reason", "您的问题需要人工客服协助")
state = set_response(
state,
f"正在为您转接人工客服,请稍候。\n转接原因:{reason}\n\n人工客服将尽快为您服务。"
)
return state
async def send_response(state: AgentState) -> AgentState:
"""Finalize and send response
This is the final node that marks processing as complete.
"""
logger.info(
"Sending response",
conversation_id=state["conversation_id"],
response_length=len(state.get("response", ""))
)
# Add assistant response to history
if state.get("response"):
state["messages"].append({
"role": "assistant",
"content": state["response"]
})
state = mark_finished(state)
return state
async def handle_error(state: AgentState) -> AgentState:
"""Handle errors in the workflow"""
logger.error(
"Workflow error",
conversation_id=state["conversation_id"],
error=state.get("error")
)
state = set_response(
state,
"抱歉,处理您的请求时遇到了问题。请稍后重试,或联系人工客服获取帮助。"
)
state = mark_finished(state)
return state
# ============ Routing Functions ============
def should_call_tools(state: AgentState) -> Literal["call_tools", "send_response", "back_to_agent"]:
"""Determine if tools need to be called"""
# If there are pending tool calls, execute them
if state.get("tool_calls"):
return "call_tools"
# If we have a response ready, send it
if state.get("response"):
return "send_response"
# If we're waiting for info, send the question
if state.get("state") == ConversationState.AWAITING_INFO.value:
return "send_response"
# Otherwise, something went wrong
return "send_response"
def after_tools(state: AgentState) -> str:
"""Route after tool execution
Returns the agent that should process the tool results.
"""
current_agent = state.get("current_agent")
# Route back to the agent that made the tool call
agent_mapping = {
"customer_service": "customer_service_agent",
"order": "order_agent",
"aftersale": "aftersale_agent",
"product": "product_agent"
}
return agent_mapping.get(current_agent, "customer_service_agent")
def check_completion(state: AgentState) -> Literal["continue", "end", "error"]:
"""Check if workflow should continue or end"""
# Check for errors
if state.get("error"):
return "error"
# Check if finished
if state.get("finished"):
return "end"
# Check step limit
if state.get("step_count", 0) >= state.get("max_steps", 10):
logger.warning("Max steps reached", conversation_id=state["conversation_id"])
return "end"
return "continue"
# ============ Graph Construction ============
def create_agent_graph() -> StateGraph:
"""Create the main agent workflow graph
Returns:
Compiled LangGraph workflow
"""
# Create graph with AgentState
graph = StateGraph(AgentState)
# Add nodes
graph.add_node("receive", receive_message)
graph.add_node("classify", classify_intent)
graph.add_node("customer_service_agent", customer_service_agent)
graph.add_node("order_agent", order_agent)
graph.add_node("aftersale_agent", aftersale_agent)
graph.add_node("product_agent", product_agent)
graph.add_node("call_tools", call_mcp_tools)
graph.add_node("human_handoff", human_handoff)
graph.add_node("send_response", send_response)
graph.add_node("handle_error", handle_error)
# Set entry point
graph.set_entry_point("receive")
# Add edges
graph.add_edge("receive", "classify")
# Conditional routing based on intent
graph.add_conditional_edges(
"classify",
route_by_intent,
{
"customer_service_agent": "customer_service_agent",
"order_agent": "order_agent",
"aftersale_agent": "aftersale_agent",
"product_agent": "product_agent",
"human_handoff": "human_handoff"
}
)
# After each agent, check if tools need to be called
for agent_node in ["customer_service_agent", "order_agent", "aftersale_agent", "product_agent"]:
graph.add_conditional_edges(
agent_node,
should_call_tools,
{
"call_tools": "call_tools",
"send_response": "send_response",
"back_to_agent": agent_node
}
)
# After tool execution, route back to appropriate agent
graph.add_conditional_edges(
"call_tools",
after_tools,
{
"customer_service_agent": "customer_service_agent",
"order_agent": "order_agent",
"aftersale_agent": "aftersale_agent",
"product_agent": "product_agent"
}
)
# Human handoff leads to send response
graph.add_edge("human_handoff", "send_response")
# Error handling
graph.add_edge("handle_error", END)
# Final node
graph.add_edge("send_response", END)
return graph.compile()
# Global compiled graph
_compiled_graph = None
def get_agent_graph():
"""Get or create the compiled agent graph"""
global _compiled_graph
if _compiled_graph is None:
_compiled_graph = create_agent_graph()
return _compiled_graph
async def process_message(
conversation_id: str,
user_id: str,
account_id: str,
message: str,
history: list[dict] = None,
context: dict = None
) -> AgentState:
"""Process a user message through the agent workflow
Args:
conversation_id: Chatwoot conversation ID
user_id: User identifier
account_id: B2B account identifier
message: User's message
history: Previous conversation history
context: Existing conversation context
Returns:
Final agent state with response
"""
from .state import create_initial_state
# Create initial state
initial_state = create_initial_state(
conversation_id=conversation_id,
user_id=user_id,
account_id=account_id,
current_message=message,
messages=history,
context=context
)
# Get compiled graph
graph = get_agent_graph()
# Run the workflow
logger.info(
"Starting workflow",
conversation_id=conversation_id,
message=message[:100]
)
try:
final_state = await graph.ainvoke(initial_state)
logger.info(
"Workflow completed",
conversation_id=conversation_id,
intent=final_state.get("intent"),
steps=final_state.get("step_count")
)
return final_state
except Exception as e:
logger.error("Workflow failed", error=str(e))
initial_state["error"] = str(e)
initial_state["response"] = "抱歉,处理您的请求时遇到了问题。请稍后重试。"
initial_state["finished"] = True
return initial_state

195
agent/core/llm.py Normal file
View File

@@ -0,0 +1,195 @@
"""
ZhipuAI LLM Client for B2B Shopping AI Assistant
"""
from typing import Any, AsyncGenerator, Optional
from dataclasses import dataclass
from zhipuai import ZhipuAI
from config import settings
from utils.logger import get_logger
logger = get_logger(__name__)
@dataclass
class Message:
"""Chat message structure"""
role: str # "system", "user", "assistant"
content: str
@dataclass
class LLMResponse:
"""LLM response structure"""
content: str
finish_reason: str
usage: dict[str, int]
class ZhipuLLMClient:
"""ZhipuAI LLM Client wrapper"""
def __init__(
self,
api_key: Optional[str] = None,
model: Optional[str] = None
):
"""Initialize ZhipuAI client
Args:
api_key: ZhipuAI API key, defaults to settings
model: Model name, defaults to settings
"""
self.api_key = api_key or settings.zhipu_api_key
self.model = model or settings.zhipu_model
self._client = ZhipuAI(api_key=self.api_key)
logger.info("ZhipuAI client initialized", model=self.model)
async def chat(
self,
messages: list[Message],
temperature: float = 0.7,
max_tokens: int = 2048,
top_p: float = 0.9,
**kwargs: Any
) -> LLMResponse:
"""Send chat completion request
Args:
messages: List of chat messages
temperature: Sampling temperature
max_tokens: Maximum tokens to generate
top_p: Top-p sampling parameter
**kwargs: Additional parameters
Returns:
LLM response with content and metadata
"""
formatted_messages = [
{"role": msg.role, "content": msg.content}
for msg in messages
]
logger.debug(
"Sending chat request",
model=self.model,
message_count=len(messages),
temperature=temperature
)
try:
response = self._client.chat.completions.create(
model=self.model,
messages=formatted_messages,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
**kwargs
)
choice = response.choices[0]
result = LLMResponse(
content=choice.message.content,
finish_reason=choice.finish_reason,
usage={
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
)
logger.debug(
"Chat response received",
finish_reason=result.finish_reason,
total_tokens=result.usage["total_tokens"]
)
return result
except Exception as e:
logger.error("Chat request failed", error=str(e))
raise
async def chat_with_tools(
self,
messages: list[Message],
tools: list[dict[str, Any]],
temperature: float = 0.7,
**kwargs: Any
) -> tuple[LLMResponse, Optional[list[dict[str, Any]]]]:
"""Send chat completion request with tool calling
Args:
messages: List of chat messages
tools: List of tool definitions
temperature: Sampling temperature
**kwargs: Additional parameters
Returns:
Tuple of (LLM response, tool calls if any)
"""
formatted_messages = [
{"role": msg.role, "content": msg.content}
for msg in messages
]
logger.debug(
"Sending chat request with tools",
model=self.model,
tool_count=len(tools)
)
try:
response = self._client.chat.completions.create(
model=self.model,
messages=formatted_messages,
tools=tools,
temperature=temperature,
**kwargs
)
choice = response.choices[0]
result = LLMResponse(
content=choice.message.content or "",
finish_reason=choice.finish_reason,
usage={
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
)
# Extract tool calls if present
tool_calls = None
if hasattr(choice.message, 'tool_calls') and choice.message.tool_calls:
tool_calls = [
{
"id": tc.id,
"type": tc.type,
"function": {
"name": tc.function.name,
"arguments": tc.function.arguments
}
}
for tc in choice.message.tool_calls
]
logger.debug("Tool calls received", tool_count=len(tool_calls))
return result, tool_calls
except Exception as e:
logger.error("Chat with tools request failed", error=str(e))
raise
# Global LLM client instance
llm_client: Optional[ZhipuLLMClient] = None
def get_llm_client() -> ZhipuLLMClient:
"""Get or create global LLM client instance"""
global llm_client
if llm_client is None:
llm_client = ZhipuLLMClient()
return llm_client

272
agent/core/state.py Normal file
View File

@@ -0,0 +1,272 @@
"""
Agent state definitions for LangGraph workflow
"""
from typing import Any, Optional, Literal
from typing_extensions import TypedDict, Annotated
from dataclasses import dataclass, field
from enum import Enum
class Intent(str, Enum):
"""User intent categories"""
CUSTOMER_SERVICE = "customer_service" # FAQ, general inquiries
ORDER = "order" # Order query, tracking, modify, cancel
AFTERSALE = "aftersale" # Return, exchange, complaint
PRODUCT = "product" # Search, recommend, quote
HUMAN_HANDOFF = "human_handoff" # Transfer to human agent
UNKNOWN = "unknown" # Cannot determine intent
class ConversationState(str, Enum):
"""Conversation state machine"""
INITIAL = "initial"
CLASSIFYING = "classifying"
PROCESSING = "processing"
AWAITING_INFO = "awaiting_info"
TOOL_CALLING = "tool_calling"
GENERATING = "generating"
HUMAN_REVIEW = "human_review"
COMPLETED = "completed"
ERROR = "error"
@dataclass
class Entity:
"""Extracted entity from user message"""
type: str # Entity type (order_id, product_id, date, etc.)
value: Any # Entity value
confidence: float # Extraction confidence (0-1)
@dataclass
class ToolCall:
"""MCP tool call record"""
tool_name: str
arguments: dict[str, Any]
server: str # MCP server name (strapi, order, aftersale, product)
@dataclass
class ToolResult:
"""MCP tool execution result"""
tool_name: str
success: bool
data: Any
error: Optional[str] = None
class AgentState(TypedDict):
"""Main agent state for LangGraph workflow
This state is passed through all nodes in the workflow graph.
"""
# ============ Session Information ============
conversation_id: str # Chatwoot conversation ID
user_id: str # User identifier
account_id: str # B2B account identifier
# ============ Message Content ============
messages: list[dict[str, Any]] # Conversation history [{role, content}]
current_message: str # Current user message being processed
# ============ Intent Recognition ============
intent: Optional[str] # Recognized intent (Intent enum value)
intent_confidence: float # Intent confidence score (0-1)
sub_intent: Optional[str] # Sub-intent for more specific routing
# ============ Entity Extraction ============
entities: dict[str, Any] # Extracted entities {type: value}
# ============ Agent Routing ============
current_agent: Optional[str] # Current processing agent name
agent_history: list[str] # History of agents involved
# ============ Tool Calling ============
tool_calls: list[dict[str, Any]] # Pending tool calls
tool_results: list[dict[str, Any]] # Tool execution results
# ============ Response Generation ============
response: Optional[str] # Generated response text
response_type: str # Response type (text, rich, action)
# ============ Human Handoff ============
requires_human: bool # Whether human intervention is needed
handoff_reason: Optional[str] # Reason for human handoff
# ============ Conversation Context ============
context: dict[str, Any] # Accumulated context (order details, etc.)
# ============ State Control ============
state: str # Current conversation state
step_count: int # Number of steps taken
max_steps: int # Maximum allowed steps
error: Optional[str] # Error message if any
finished: bool # Whether processing is complete
def create_initial_state(
conversation_id: str,
user_id: str,
account_id: str,
current_message: str,
messages: Optional[list[dict[str, Any]]] = None,
context: Optional[dict[str, Any]] = None
) -> AgentState:
"""Create initial agent state for a new message
Args:
conversation_id: Chatwoot conversation ID
user_id: User identifier
account_id: B2B account identifier
current_message: User's message to process
messages: Previous conversation history
context: Existing conversation context
Returns:
Initialized AgentState
"""
return AgentState(
# Session
conversation_id=conversation_id,
user_id=user_id,
account_id=account_id,
# Messages
messages=messages or [],
current_message=current_message,
# Intent
intent=None,
intent_confidence=0.0,
sub_intent=None,
# Entities
entities={},
# Routing
current_agent=None,
agent_history=[],
# Tools
tool_calls=[],
tool_results=[],
# Response
response=None,
response_type="text",
# Human handoff
requires_human=False,
handoff_reason=None,
# Context
context=context or {},
# Control
state=ConversationState.INITIAL.value,
step_count=0,
max_steps=10,
error=None,
finished=False
)
# ============ State Update Helpers ============
def add_message(state: AgentState, role: str, content: str) -> AgentState:
"""Add a message to the conversation history"""
state["messages"].append({"role": role, "content": content})
return state
def set_intent(
state: AgentState,
intent: Intent,
confidence: float,
sub_intent: Optional[str] = None
) -> AgentState:
"""Set the recognized intent"""
state["intent"] = intent.value
state["intent_confidence"] = confidence
state["sub_intent"] = sub_intent
return state
def add_entity(state: AgentState, entity_type: str, value: Any) -> AgentState:
"""Add an extracted entity"""
state["entities"][entity_type] = value
return state
def add_tool_call(
state: AgentState,
tool_name: str,
arguments: dict[str, Any],
server: str
) -> AgentState:
"""Add a tool call to pending calls"""
state["tool_calls"].append({
"tool_name": tool_name,
"arguments": arguments,
"server": server
})
return state
def add_tool_result(
state: AgentState,
tool_name: str,
success: bool,
data: Any,
error: Optional[str] = None
) -> AgentState:
"""Add a tool execution result"""
state["tool_results"].append({
"tool_name": tool_name,
"success": success,
"data": data,
"error": error
})
return state
def set_response(
state: AgentState,
response: str,
response_type: str = "text"
) -> AgentState:
"""Set the generated response"""
state["response"] = response
state["response_type"] = response_type
return state
def request_human_handoff(
state: AgentState,
reason: str
) -> AgentState:
"""Request transfer to human agent"""
state["requires_human"] = True
state["handoff_reason"] = reason
return state
def update_context(state: AgentState, updates: dict[str, Any]) -> AgentState:
"""Update conversation context"""
state["context"].update(updates)
return state
def set_error(state: AgentState, error: str) -> AgentState:
"""Set error state"""
state["error"] = error
state["state"] = ConversationState.ERROR.value
return state
def mark_finished(state: AgentState) -> AgentState:
"""Mark processing as complete"""
state["finished"] = True
state["state"] = ConversationState.COMPLETED.value
return state

View File

@@ -0,0 +1,14 @@
"""Agent integrations package"""
from .chatwoot import ChatwootClient, get_chatwoot_client, MessageType, ConversationStatus
from .hyperf_client import HyperfClient, get_hyperf_client, APIResponse, APIError
__all__ = [
"ChatwootClient",
"get_chatwoot_client",
"MessageType",
"ConversationStatus",
"HyperfClient",
"get_hyperf_client",
"APIResponse",
"APIError",
]

View File

@@ -0,0 +1,354 @@
"""
Chatwoot API Client for B2B Shopping AI Assistant
"""
from typing import Any, Optional
from dataclasses import dataclass
from enum import Enum
import httpx
from config import settings
from utils.logger import get_logger
logger = get_logger(__name__)
class MessageType(str, Enum):
"""Chatwoot message types"""
INCOMING = "incoming"
OUTGOING = "outgoing"
class ConversationStatus(str, Enum):
"""Chatwoot conversation status"""
OPEN = "open"
RESOLVED = "resolved"
PENDING = "pending"
SNOOZED = "snoozed"
@dataclass
class ChatwootMessage:
"""Chatwoot message structure"""
id: int
content: str
message_type: str
conversation_id: int
sender_type: Optional[str] = None
sender_id: Optional[int] = None
private: bool = False
@dataclass
class ChatwootContact:
"""Chatwoot contact structure"""
id: int
name: Optional[str] = None
email: Optional[str] = None
phone_number: Optional[str] = None
custom_attributes: Optional[dict[str, Any]] = None
class ChatwootClient:
"""Chatwoot API Client"""
def __init__(
self,
api_url: Optional[str] = None,
api_token: Optional[str] = None,
account_id: int = 1
):
"""Initialize Chatwoot client
Args:
api_url: Chatwoot API URL, defaults to settings
api_token: API access token, defaults to settings
account_id: Chatwoot account ID
"""
self.api_url = (api_url or settings.chatwoot_api_url).rstrip("/")
self.api_token = api_token or settings.chatwoot_api_token
self.account_id = account_id
self._client: Optional[httpx.AsyncClient] = None
logger.info("Chatwoot client initialized", api_url=self.api_url)
async def _get_client(self) -> httpx.AsyncClient:
"""Get or create HTTP client"""
if self._client is None:
self._client = httpx.AsyncClient(
base_url=f"{self.api_url}/api/v1/accounts/{self.account_id}",
headers={
"api_access_token": self.api_token,
"Content-Type": "application/json"
},
timeout=30.0
)
return self._client
async def close(self) -> None:
"""Close HTTP client"""
if self._client:
await self._client.aclose()
self._client = None
# ============ Messages ============
async def send_message(
self,
conversation_id: int,
content: str,
message_type: MessageType = MessageType.OUTGOING,
private: bool = False
) -> dict[str, Any]:
"""Send a message to a conversation
Args:
conversation_id: Conversation ID
content: Message content
message_type: Message type (incoming/outgoing)
private: Whether message is private (internal note)
Returns:
Created message data
"""
client = await self._get_client()
payload = {
"content": content,
"message_type": message_type.value,
"private": private
}
response = await client.post(
f"/conversations/{conversation_id}/messages",
json=payload
)
response.raise_for_status()
data = response.json()
logger.debug(
"Message sent",
conversation_id=conversation_id,
message_id=data.get("id")
)
return data
async def send_rich_message(
self,
conversation_id: int,
content: str,
content_type: str,
content_attributes: dict[str, Any]
) -> dict[str, Any]:
"""Send a rich message (cards, buttons, etc.)
Args:
conversation_id: Conversation ID
content: Fallback text content
content_type: Rich content type (cards, input_select, etc.)
content_attributes: Rich content attributes
Returns:
Created message data
"""
client = await self._get_client()
payload = {
"content": content,
"message_type": MessageType.OUTGOING.value,
"content_type": content_type,
"content_attributes": content_attributes
}
response = await client.post(
f"/conversations/{conversation_id}/messages",
json=payload
)
response.raise_for_status()
return response.json()
# ============ Conversations ============
async def get_conversation(self, conversation_id: int) -> dict[str, Any]:
"""Get conversation details
Args:
conversation_id: Conversation ID
Returns:
Conversation data
"""
client = await self._get_client()
response = await client.get(f"/conversations/{conversation_id}")
response.raise_for_status()
return response.json()
async def update_conversation_status(
self,
conversation_id: int,
status: ConversationStatus
) -> dict[str, Any]:
"""Update conversation status
Args:
conversation_id: Conversation ID
status: New status
Returns:
Updated conversation data
"""
client = await self._get_client()
response = await client.post(
f"/conversations/{conversation_id}/toggle_status",
json={"status": status.value}
)
response.raise_for_status()
logger.info(
"Conversation status updated",
conversation_id=conversation_id,
status=status.value
)
return response.json()
async def add_labels(
self,
conversation_id: int,
labels: list[str]
) -> dict[str, Any]:
"""Add labels to a conversation
Args:
conversation_id: Conversation ID
labels: List of label names
Returns:
Updated labels
"""
client = await self._get_client()
response = await client.post(
f"/conversations/{conversation_id}/labels",
json={"labels": labels}
)
response.raise_for_status()
return response.json()
async def assign_agent(
self,
conversation_id: int,
agent_id: int
) -> dict[str, Any]:
"""Assign an agent to a conversation
Args:
conversation_id: Conversation ID
agent_id: Agent user ID
Returns:
Assignment result
"""
client = await self._get_client()
response = await client.post(
f"/conversations/{conversation_id}/assignments",
json={"assignee_id": agent_id}
)
response.raise_for_status()
logger.info(
"Agent assigned",
conversation_id=conversation_id,
agent_id=agent_id
)
return response.json()
# ============ Contacts ============
async def get_contact(self, contact_id: int) -> dict[str, Any]:
"""Get contact details
Args:
contact_id: Contact ID
Returns:
Contact data
"""
client = await self._get_client()
response = await client.get(f"/contacts/{contact_id}")
response.raise_for_status()
return response.json()
async def update_contact(
self,
contact_id: int,
attributes: dict[str, Any]
) -> dict[str, Any]:
"""Update contact attributes
Args:
contact_id: Contact ID
attributes: Attributes to update
Returns:
Updated contact data
"""
client = await self._get_client()
response = await client.put(
f"/contacts/{contact_id}",
json=attributes
)
response.raise_for_status()
return response.json()
# ============ Messages History ============
async def get_messages(
self,
conversation_id: int,
before: Optional[int] = None
) -> list[dict[str, Any]]:
"""Get conversation messages
Args:
conversation_id: Conversation ID
before: Get messages before this message ID
Returns:
List of messages
"""
client = await self._get_client()
params = {}
if before:
params["before"] = before
response = await client.get(
f"/conversations/{conversation_id}/messages",
params=params
)
response.raise_for_status()
data = response.json()
return data.get("payload", [])
# Global Chatwoot client instance
chatwoot_client: Optional[ChatwootClient] = None
def get_chatwoot_client() -> ChatwootClient:
"""Get or create global Chatwoot client instance"""
global chatwoot_client
if chatwoot_client is None:
chatwoot_client = ChatwootClient()
return chatwoot_client

View File

@@ -0,0 +1,538 @@
"""
Hyperf PHP API Client for B2B Shopping AI Assistant
"""
from typing import Any, Optional
from dataclasses import dataclass
from enum import Enum
import httpx
from config import settings
from utils.logger import get_logger
logger = get_logger(__name__)
class APIError(Exception):
"""API error with code and message"""
def __init__(self, code: int, message: str, data: Optional[Any] = None):
self.code = code
self.message = message
self.data = data
super().__init__(f"[{code}] {message}")
@dataclass
class APIResponse:
"""Standardized API response"""
code: int
message: str
data: Any
meta: Optional[dict[str, Any]] = None
@property
def success(self) -> bool:
return self.code == 0
class HyperfClient:
"""Hyperf PHP API Client"""
def __init__(
self,
api_url: Optional[str] = None,
api_token: Optional[str] = None
):
"""Initialize Hyperf client
Args:
api_url: Hyperf API base URL, defaults to settings
api_token: API access token, defaults to settings
"""
self.api_url = (api_url or settings.hyperf_api_url).rstrip("/")
self.api_token = api_token or settings.hyperf_api_token
self._client: Optional[httpx.AsyncClient] = None
logger.info("Hyperf client initialized", api_url=self.api_url)
async def _get_client(self) -> httpx.AsyncClient:
"""Get or create HTTP client"""
if self._client is None:
self._client = httpx.AsyncClient(
base_url=f"{self.api_url}/api/v1",
headers={
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
"Accept": "application/json"
},
timeout=30.0
)
return self._client
async def close(self) -> None:
"""Close HTTP client"""
if self._client:
await self._client.aclose()
self._client = None
async def _request(
self,
method: str,
endpoint: str,
params: Optional[dict[str, Any]] = None,
json: Optional[dict[str, Any]] = None,
headers: Optional[dict[str, str]] = None
) -> APIResponse:
"""Make API request
Args:
method: HTTP method
endpoint: API endpoint
params: Query parameters
json: JSON body
headers: Additional headers
Returns:
Parsed API response
Raises:
APIError: If API returns error
"""
client = await self._get_client()
# Merge headers
request_headers = {}
if headers:
request_headers.update(headers)
logger.debug(
"API request",
method=method,
endpoint=endpoint
)
try:
response = await client.request(
method=method,
url=endpoint,
params=params,
json=json,
headers=request_headers
)
response.raise_for_status()
data = response.json()
result = APIResponse(
code=data.get("code", 0),
message=data.get("message", "success"),
data=data.get("data"),
meta=data.get("meta")
)
if not result.success:
raise APIError(result.code, result.message, result.data)
logger.debug(
"API response",
endpoint=endpoint,
code=result.code
)
return result
except httpx.HTTPStatusError as e:
logger.error(
"HTTP error",
endpoint=endpoint,
status_code=e.response.status_code
)
raise APIError(
e.response.status_code,
f"HTTP error: {e.response.status_code}"
)
except Exception as e:
logger.error("API request failed", endpoint=endpoint, error=str(e))
raise
async def get(
self,
endpoint: str,
params: Optional[dict[str, Any]] = None,
**kwargs: Any
) -> APIResponse:
"""GET request"""
return await self._request("GET", endpoint, params=params, **kwargs)
async def post(
self,
endpoint: str,
json: Optional[dict[str, Any]] = None,
**kwargs: Any
) -> APIResponse:
"""POST request"""
return await self._request("POST", endpoint, json=json, **kwargs)
async def put(
self,
endpoint: str,
json: Optional[dict[str, Any]] = None,
**kwargs: Any
) -> APIResponse:
"""PUT request"""
return await self._request("PUT", endpoint, json=json, **kwargs)
async def delete(
self,
endpoint: str,
**kwargs: Any
) -> APIResponse:
"""DELETE request"""
return await self._request("DELETE", endpoint, **kwargs)
# ============ Order APIs ============
async def query_orders(
self,
user_id: str,
account_id: str,
order_id: Optional[str] = None,
status: Optional[str] = None,
date_start: Optional[str] = None,
date_end: Optional[str] = None,
page: int = 1,
page_size: int = 20
) -> APIResponse:
"""Query orders
Args:
user_id: User ID
account_id: Account ID
order_id: Optional specific order ID
status: Optional order status filter
date_start: Optional start date (YYYY-MM-DD)
date_end: Optional end date (YYYY-MM-DD)
page: Page number
page_size: Items per page
Returns:
Orders list response
"""
payload = {
"user_id": user_id,
"account_id": account_id,
"page": page,
"page_size": page_size
}
if order_id:
payload["order_id"] = order_id
if status:
payload["status"] = status
if date_start:
payload["date_range"] = {"start": date_start}
if date_end:
payload.setdefault("date_range", {})["end"] = date_end
return await self.post("/orders/query", json=payload)
async def get_logistics(
self,
order_id: str,
tracking_number: Optional[str] = None
) -> APIResponse:
"""Get order logistics information
Args:
order_id: Order ID
tracking_number: Optional tracking number
Returns:
Logistics tracking response
"""
params = {}
if tracking_number:
params["tracking_number"] = tracking_number
return await self.get(f"/orders/{order_id}/logistics", params=params)
async def modify_order(
self,
order_id: str,
user_id: str,
modifications: dict[str, Any]
) -> APIResponse:
"""Modify order
Args:
order_id: Order ID
user_id: User ID for permission check
modifications: Changes to apply
Returns:
Modified order response
"""
return await self.put(
f"/orders/{order_id}/modify",
json={
"user_id": user_id,
"modifications": modifications
}
)
async def cancel_order(
self,
order_id: str,
user_id: str,
reason: str
) -> APIResponse:
"""Cancel order
Args:
order_id: Order ID
user_id: User ID for permission check
reason: Cancellation reason
Returns:
Cancellation result with refund info
"""
return await self.post(
f"/orders/{order_id}/cancel",
json={
"user_id": user_id,
"reason": reason
}
)
# ============ Product APIs ============
async def search_products(
self,
query: str,
filters: Optional[dict[str, Any]] = None,
sort: str = "relevance",
page: int = 1,
page_size: int = 20
) -> APIResponse:
"""Search products
Args:
query: Search query
filters: Optional filters (category, price_range, brand, etc.)
sort: Sort order
page: Page number
page_size: Items per page
Returns:
Products list response
"""
payload = {
"query": query,
"sort": sort,
"page": page,
"page_size": page_size
}
if filters:
payload["filters"] = filters
return await self.post("/products/search", json=payload)
async def get_product(self, product_id: str) -> APIResponse:
"""Get product details
Args:
product_id: Product ID
Returns:
Product details response
"""
return await self.get(f"/products/{product_id}")
async def get_recommendations(
self,
user_id: str,
account_id: str,
context: Optional[dict[str, Any]] = None,
limit: int = 10
) -> APIResponse:
"""Get product recommendations
Args:
user_id: User ID
account_id: Account ID
context: Optional context (recent views, current query)
limit: Number of recommendations
Returns:
Recommendations response
"""
payload = {
"user_id": user_id,
"account_id": account_id,
"limit": limit
}
if context:
payload["context"] = context
return await self.post("/products/recommend", json=payload)
async def get_quote(
self,
product_id: str,
quantity: int,
account_id: str,
delivery_address: Optional[dict[str, str]] = None
) -> APIResponse:
"""Get B2B price quote
Args:
product_id: Product ID
quantity: Quantity
account_id: Account ID for pricing tier
delivery_address: Optional delivery address
Returns:
Quote response with pricing details
"""
payload = {
"product_id": product_id,
"quantity": quantity,
"account_id": account_id
}
if delivery_address:
payload["delivery_address"] = delivery_address
return await self.post("/products/quote", json=payload)
# ============ Aftersale APIs ============
async def apply_return(
self,
order_id: str,
user_id: str,
items: list[dict[str, Any]],
description: str,
images: Optional[list[str]] = None
) -> APIResponse:
"""Apply for return
Args:
order_id: Order ID
user_id: User ID
items: Items to return with quantity and reason
description: Description of issue
images: Optional image URLs
Returns:
Return application response
"""
payload = {
"order_id": order_id,
"user_id": user_id,
"items": items,
"description": description
}
if images:
payload["images"] = images
return await self.post("/aftersales/return", json=payload)
async def apply_exchange(
self,
order_id: str,
user_id: str,
items: list[dict[str, Any]],
description: str
) -> APIResponse:
"""Apply for exchange
Args:
order_id: Order ID
user_id: User ID
items: Items to exchange with reason
description: Description of issue
Returns:
Exchange application response
"""
return await self.post(
"/aftersales/exchange",
json={
"order_id": order_id,
"user_id": user_id,
"items": items,
"description": description
}
)
async def create_complaint(
self,
user_id: str,
complaint_type: str,
title: str,
description: str,
related_order_id: Optional[str] = None,
attachments: Optional[list[str]] = None
) -> APIResponse:
"""Create complaint
Args:
user_id: User ID
complaint_type: Type of complaint
title: Complaint title
description: Detailed description
related_order_id: Optional related order
attachments: Optional attachment URLs
Returns:
Complaint creation response
"""
payload = {
"user_id": user_id,
"type": complaint_type,
"title": title,
"description": description
}
if related_order_id:
payload["related_order_id"] = related_order_id
if attachments:
payload["attachments"] = attachments
return await self.post("/aftersales/complaint", json=payload)
async def query_aftersales(
self,
user_id: str,
aftersale_id: Optional[str] = None
) -> APIResponse:
"""Query aftersale records
Args:
user_id: User ID
aftersale_id: Optional specific aftersale ID
Returns:
Aftersale records response
"""
params = {"user_id": user_id}
if aftersale_id:
params["aftersale_id"] = aftersale_id
return await self.get("/aftersales/query", params=params)
# Global Hyperf client instance
hyperf_client: Optional[HyperfClient] = None
def get_hyperf_client() -> HyperfClient:
"""Get or create global Hyperf client instance"""
global hyperf_client
if hyperf_client is None:
hyperf_client = HyperfClient()
return hyperf_client

205
agent/main.py Normal file
View File

@@ -0,0 +1,205 @@
"""
B2B Shopping AI Assistant - Main Application Entry
"""
from contextlib import asynccontextmanager
from typing import Any
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from config import settings
from webhooks.chatwoot_webhook import router as webhook_router
from utils.logger import setup_logging, get_logger
from utils.cache import get_cache_manager
from integrations.chatwoot import get_chatwoot_client
# Setup logging
setup_logging(settings.log_level)
logger = get_logger(__name__)
# ============ Lifespan Management ============
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Application lifespan manager"""
# Startup
logger.info("Starting B2B Shopping AI Assistant")
# Initialize cache connection
cache = get_cache_manager()
await cache.connect()
logger.info("Redis cache connected")
yield
# Shutdown
logger.info("Shutting down B2B Shopping AI Assistant")
# Close connections
await cache.disconnect()
chatwoot = get_chatwoot_client()
await chatwoot.close()
logger.info("Connections closed")
# ============ Application Setup ============
app = FastAPI(
title="B2B Shopping AI Assistant",
description="AI-powered customer service assistant with LangGraph and MCP",
version="1.0.0",
lifespan=lifespan
)
# CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Configure appropriately for production
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# ============ Exception Handlers ============
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
"""Global exception handler"""
logger.error(
"Unhandled exception",
path=request.url.path,
error=str(exc)
)
return JSONResponse(
status_code=500,
content={"detail": "Internal server error"}
)
# ============ Include Routers ============
app.include_router(webhook_router)
# ============ API Endpoints ============
@app.get("/health")
async def health_check():
"""Health check endpoint"""
return {
"status": "healthy",
"service": "b2b-ai-assistant",
"version": "1.0.0"
}
@app.get("/")
async def root():
"""Root endpoint"""
return {
"name": "B2B Shopping AI Assistant",
"version": "1.0.0",
"status": "running"
}
class QueryRequest(BaseModel):
"""Direct query request model"""
conversation_id: str
user_id: str
account_id: str
message: str
class QueryResponse(BaseModel):
"""Query response model"""
response: str
intent: str | None = None
requires_human: bool = False
context: dict[str, Any] = {}
@app.post("/api/agent/query", response_model=QueryResponse)
async def agent_query(request: QueryRequest):
"""Direct agent query endpoint
Allows direct testing of the agent without Chatwoot integration.
"""
from core.graph import process_message
logger.info(
"Direct query received",
conversation_id=request.conversation_id,
user_id=request.user_id
)
# Load context from cache
cache = get_cache_manager()
context = await cache.get_context(request.conversation_id)
history = await cache.get_messages(request.conversation_id)
# Process message
final_state = await process_message(
conversation_id=request.conversation_id,
user_id=request.user_id,
account_id=request.account_id,
message=request.message,
history=history,
context=context
)
# Update cache
await cache.add_message(request.conversation_id, "user", request.message)
if final_state.get("response"):
await cache.add_message(
request.conversation_id,
"assistant",
final_state["response"]
)
# Save context
new_context = final_state.get("context", {})
new_context["last_intent"] = final_state.get("intent")
await cache.set_context(request.conversation_id, new_context)
return QueryResponse(
response=final_state.get("response", ""),
intent=final_state.get("intent"),
requires_human=final_state.get("requires_human", False),
context=final_state.get("context", {})
)
@app.get("/api/config")
async def get_config():
"""Get sanitized configuration"""
return {
"zhipu_model": settings.zhipu_model,
"max_conversation_steps": settings.max_conversation_steps,
"conversation_timeout": settings.conversation_timeout,
"mcp_servers": {
"strapi": settings.strapi_mcp_url,
"order": settings.order_mcp_url,
"aftersale": settings.aftersale_mcp_url,
"product": settings.product_mcp_url
}
}
# ============ Run Application ============
if __name__ == "__main__":
import uvicorn
uvicorn.run(
"main:app",
host="0.0.0.0",
port=8000,
reload=True
)

39
agent/requirements.txt Normal file
View File

@@ -0,0 +1,39 @@
# Web Framework
fastapi>=0.109.0
uvicorn[standard]>=0.27.0
# LangGraph & LangChain
langgraph>=0.0.40
langchain>=0.1.0
langchain-core>=0.1.0
# AI Model SDK
zhipuai>=2.0.0
# Async utilities
sniffio>=1.3.0
anyio>=4.0.0
# HTTP Client
httpx>=0.26.0
aiohttp>=3.9.0
# Data Validation
pydantic>=2.5.0
pydantic-settings>=2.1.0
# Redis
redis>=5.0.0
# Environment & Config
python-dotenv>=1.0.0
# Logging
structlog>=24.1.0
# Testing
pytest>=7.4.0
pytest-asyncio>=0.23.0
pytest-cov>=4.1.0
# MCP Client
mcp>=1.0.0

0
agent/tests/__init__.py Normal file
View File

11
agent/utils/__init__.py Normal file
View File

@@ -0,0 +1,11 @@
"""Agent utilities package"""
from .logger import get_logger, setup_logging, logger
from .cache import CacheManager, get_cache_manager
__all__ = [
"get_logger",
"setup_logging",
"logger",
"CacheManager",
"get_cache_manager",
]

245
agent/utils/cache.py Normal file
View File

@@ -0,0 +1,245 @@
"""
Redis cache management for conversation context
"""
import json
from typing import Any, Optional
from datetime import timedelta
import redis.asyncio as redis
from config import settings, get_redis_url
from .logger import get_logger
logger = get_logger(__name__)
class CacheManager:
"""Redis cache manager for conversation context"""
def __init__(self, redis_url: Optional[str] = None):
"""Initialize cache manager
Args:
redis_url: Redis connection URL, defaults to settings
"""
self._redis_url = redis_url or get_redis_url()
self._client: Optional[redis.Redis] = None
async def connect(self) -> None:
"""Connect to Redis"""
if self._client is None:
self._client = redis.from_url(
self._redis_url,
encoding="utf-8",
decode_responses=True
)
logger.info("Connected to Redis")
async def disconnect(self) -> None:
"""Disconnect from Redis"""
if self._client:
await self._client.close()
self._client = None
logger.info("Disconnected from Redis")
async def _ensure_connected(self) -> redis.Redis:
"""Ensure Redis connection is established"""
if self._client is None:
await self.connect()
return self._client
# ============ Conversation Context ============
def _context_key(self, conversation_id: str) -> str:
"""Generate Redis key for conversation context"""
return f"conversation:{conversation_id}"
async def get_context(self, conversation_id: str) -> Optional[dict[str, Any]]:
"""Get conversation context
Args:
conversation_id: Unique conversation identifier
Returns:
Context dictionary or None if not found
"""
client = await self._ensure_connected()
key = self._context_key(conversation_id)
data = await client.get(key)
if data:
logger.debug("Context retrieved", conversation_id=conversation_id)
return json.loads(data)
return None
async def set_context(
self,
conversation_id: str,
context: dict[str, Any],
ttl: Optional[int] = None
) -> None:
"""Set conversation context
Args:
conversation_id: Unique conversation identifier
context: Context dictionary
ttl: Time-to-live in seconds, defaults to settings
"""
client = await self._ensure_connected()
key = self._context_key(conversation_id)
ttl = ttl or settings.conversation_timeout
await client.setex(
key,
timedelta(seconds=ttl),
json.dumps(context, ensure_ascii=False)
)
logger.debug("Context saved", conversation_id=conversation_id, ttl=ttl)
async def update_context(
self,
conversation_id: str,
updates: dict[str, Any]
) -> dict[str, Any]:
"""Update conversation context with new values
Args:
conversation_id: Unique conversation identifier
updates: Dictionary of updates to merge
Returns:
Updated context dictionary
"""
context = await self.get_context(conversation_id) or {}
context.update(updates)
await self.set_context(conversation_id, context)
return context
async def delete_context(self, conversation_id: str) -> bool:
"""Delete conversation context
Args:
conversation_id: Unique conversation identifier
Returns:
True if deleted, False if not found
"""
client = await self._ensure_connected()
key = self._context_key(conversation_id)
result = await client.delete(key)
if result:
logger.debug("Context deleted", conversation_id=conversation_id)
return bool(result)
# ============ Message History ============
def _messages_key(self, conversation_id: str) -> str:
"""Generate Redis key for message history"""
return f"messages:{conversation_id}"
async def add_message(
self,
conversation_id: str,
role: str,
content: str,
max_messages: int = 20
) -> None:
"""Add message to conversation history
Args:
conversation_id: Unique conversation identifier
role: Message role (user/assistant/system)
content: Message content
max_messages: Maximum messages to keep
"""
client = await self._ensure_connected()
key = self._messages_key(conversation_id)
message = json.dumps({
"role": role,
"content": content
}, ensure_ascii=False)
# Add to list and trim
await client.rpush(key, message)
await client.ltrim(key, -max_messages, -1)
await client.expire(key, settings.conversation_timeout)
logger.debug(
"Message added",
conversation_id=conversation_id,
role=role
)
async def get_messages(
self,
conversation_id: str,
limit: int = 20
) -> list[dict[str, str]]:
"""Get conversation message history
Args:
conversation_id: Unique conversation identifier
limit: Maximum messages to retrieve
Returns:
List of message dictionaries
"""
client = await self._ensure_connected()
key = self._messages_key(conversation_id)
messages = await client.lrange(key, -limit, -1)
return [json.loads(m) for m in messages]
async def clear_messages(self, conversation_id: str) -> bool:
"""Clear conversation message history
Args:
conversation_id: Unique conversation identifier
Returns:
True if cleared, False if not found
"""
client = await self._ensure_connected()
key = self._messages_key(conversation_id)
result = await client.delete(key)
return bool(result)
# ============ Generic Cache Operations ============
async def get(self, key: str) -> Optional[str]:
"""Get value from cache"""
client = await self._ensure_connected()
return await client.get(key)
async def set(
self,
key: str,
value: str,
ttl: Optional[int] = None
) -> None:
"""Set value in cache"""
client = await self._ensure_connected()
if ttl:
await client.setex(key, timedelta(seconds=ttl), value)
else:
await client.set(key, value)
async def delete(self, key: str) -> bool:
"""Delete key from cache"""
client = await self._ensure_connected()
return bool(await client.delete(key))
# Global cache manager instance
cache_manager: Optional[CacheManager] = None
def get_cache_manager() -> CacheManager:
"""Get or create global cache manager instance"""
global cache_manager
if cache_manager is None:
cache_manager = CacheManager()
return cache_manager

56
agent/utils/logger.py Normal file
View File

@@ -0,0 +1,56 @@
"""
Logging utilities for B2B Shopping AI Assistant
"""
import logging
import sys
from typing import Optional
import structlog
from structlog.types import Processor
def setup_logging(level: str = "INFO") -> None:
"""Setup structured logging configuration"""
# Configure standard library logging
logging.basicConfig(
format="%(message)s",
stream=sys.stdout,
level=getattr(logging, level.upper()),
)
# Define processors for structlog
shared_processors: list[Processor] = [
structlog.contextvars.merge_contextvars,
structlog.processors.add_log_level,
structlog.processors.TimeStamper(fmt="iso"),
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.UnicodeDecoder(),
]
# Configure structlog
structlog.configure(
processors=shared_processors + [
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
],
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
def get_logger(name: Optional[str] = None) -> structlog.stdlib.BoundLogger:
"""Get a structured logger instance
Args:
name: Logger name, defaults to module name
Returns:
Configured structlog logger
"""
return structlog.get_logger(name)
# Create default logger
logger = get_logger("agent")

View File

View File

@@ -0,0 +1,355 @@
"""
Chatwoot Webhook Handler
"""
import hmac
import hashlib
from typing import Any, Optional
from fastapi import APIRouter, Request, HTTPException, BackgroundTasks
from pydantic import BaseModel
from config import settings
from core.graph import process_message
from integrations.chatwoot import get_chatwoot_client, ConversationStatus
from utils.cache import get_cache_manager
from utils.logger import get_logger
logger = get_logger(__name__)
router = APIRouter(prefix="/webhooks", tags=["webhooks"])
# ============ Webhook Payload Models ============
class WebhookSender(BaseModel):
"""Webhook sender information"""
id: Optional[int] = None
name: Optional[str] = None
email: Optional[str] = None
type: Optional[str] = None # "contact" or "user"
class WebhookMessage(BaseModel):
"""Webhook message content"""
id: int
content: Optional[str] = None
message_type: str # "incoming" or "outgoing"
content_type: Optional[str] = None
private: bool = False
sender: Optional[WebhookSender] = None
class WebhookConversation(BaseModel):
"""Webhook conversation information"""
id: int
inbox_id: int
status: str
account_id: Optional[int] = None # Chatwoot may not always include this
contact_inbox: Optional[dict] = None
messages: Optional[list] = None
additional_attributes: Optional[dict] = None
can_reply: Optional[bool] = None
channel: Optional[str] = None
class WebhookContact(BaseModel):
"""Webhook contact information"""
id: int
name: Optional[str] = None
email: Optional[str] = None
phone_number: Optional[str] = None
custom_attributes: Optional[dict] = None
class ChatwootWebhookPayload(BaseModel):
"""Chatwoot webhook payload structure"""
event: str
id: Optional[int] = None
content: Optional[str] = None
message_type: Optional[str] = None
content_type: Optional[str] = None
private: Optional[bool] = False
conversation: Optional[WebhookConversation] = None
sender: Optional[WebhookSender] = None
contact: Optional[WebhookContact] = None
account: Optional[dict] = None
# ============ Signature Verification ============
def verify_webhook_signature(payload: bytes, signature: str) -> bool:
"""Verify Chatwoot webhook signature
Args:
payload: Raw request body
signature: X-Chatwoot-Signature header value
Returns:
True if signature is valid
"""
# TODO: Re-enable signature verification after configuring Chatwoot properly
# For now, skip verification to test webhook functionality
logger.debug("Skipping webhook signature verification for testing")
return True
if not settings.chatwoot_webhook_secret:
logger.warning("Webhook secret not configured, skipping verification")
return True
if not signature:
logger.warning("No signature provided in request")
return True
expected = hmac.new(
settings.chatwoot_webhook_secret.encode(),
payload,
hashlib.sha256
).hexdigest()
return hmac.compare_digest(expected, signature)
# ============ Message Processing ============
async def handle_incoming_message(payload: ChatwootWebhookPayload) -> None:
"""Process incoming message from Chatwoot
Args:
payload: Webhook payload
"""
conversation = payload.conversation
if not conversation:
logger.warning("No conversation in payload")
return
conversation_id = str(conversation.id)
content = payload.content
if not content:
logger.debug("Empty message content, skipping")
return
# Get user/contact info
contact = payload.contact or payload.sender
user_id = str(contact.id) if contact else "unknown"
# Get account_id from payload (top-level account object)
# Chatwoot webhook includes account info at the top level
account_obj = payload.account
account_id = str(account_obj.get("id")) if account_obj else "1"
logger.info(
"Processing incoming message",
conversation_id=conversation_id,
user_id=user_id,
message_length=len(content)
)
# Load conversation context from cache
cache = get_cache_manager()
await cache.connect()
context = await cache.get_context(conversation_id)
history = await cache.get_messages(conversation_id)
try:
# Process message through agent workflow
final_state = await process_message(
conversation_id=conversation_id,
user_id=user_id,
account_id=account_id,
message=content,
history=history,
context=context
)
# Get response
response = final_state.get("response")
if not response:
response = "抱歉,我暂时无法处理您的请求。请稍后重试或联系人工客服。"
# Send response to Chatwoot
# Create client with correct account_id from webhook
from integrations.chatwoot import ChatwootClient
chatwoot = ChatwootClient(account_id=int(account_id))
await chatwoot.send_message(
conversation_id=conversation.id,
content=response
)
await chatwoot.close()
# Handle human handoff
if final_state.get("requires_human"):
await chatwoot.update_conversation_status(
conversation_id=conversation.id,
status=ConversationStatus.OPEN
)
# Add label for routing
await chatwoot.add_labels(
conversation_id=conversation.id,
labels=["needs_human", final_state.get("intent", "unknown")]
)
# Update cache
await cache.add_message(conversation_id, "user", content)
await cache.add_message(conversation_id, "assistant", response)
# Save context
new_context = final_state.get("context", {})
new_context["last_intent"] = final_state.get("intent")
await cache.set_context(conversation_id, new_context)
logger.info(
"Message processed successfully",
conversation_id=conversation_id,
intent=final_state.get("intent"),
requires_human=final_state.get("requires_human")
)
except Exception as e:
logger.error(
"Message processing failed",
conversation_id=conversation_id,
error=str(e)
)
# Send error response
chatwoot = get_chatwoot_client()
await chatwoot.send_message(
conversation_id=conversation.id,
content="抱歉,处理您的消息时遇到了问题。我们的客服团队将尽快为您服务。"
)
# Transfer to human
await chatwoot.update_conversation_status(
conversation_id=conversation.id,
status=ConversationStatus.OPEN
)
async def handle_conversation_created(payload: ChatwootWebhookPayload) -> None:
"""Handle new conversation created
Args:
payload: Webhook payload
"""
conversation = payload.conversation
if not conversation:
return
conversation_id = str(conversation.id)
logger.info(
"New conversation created",
conversation_id=conversation_id
)
# Initialize conversation context
cache = get_cache_manager()
await cache.connect()
context = {
"created": True,
"inbox_id": conversation.inbox_id
}
# Add contact info to context
contact = payload.contact
if contact:
context["contact_name"] = contact.name
context["contact_email"] = contact.email
if contact.custom_attributes:
context.update(contact.custom_attributes)
await cache.set_context(conversation_id, context)
# Send welcome message
chatwoot = get_chatwoot_client()
await chatwoot.send_message(
conversation_id=conversation.id,
content="您好!我是 AI 智能助手,很高兴为您服务。请问有什么可以帮您的?\n\n您可以询问我关于订单、商品、售后等问题,我会尽力为您解答。"
)
async def handle_conversation_status_changed(payload: ChatwootWebhookPayload) -> None:
"""Handle conversation status change
Args:
payload: Webhook payload
"""
conversation = payload.conversation
if not conversation:
return
conversation_id = str(conversation.id)
new_status = conversation.status
logger.info(
"Conversation status changed",
conversation_id=conversation_id,
status=new_status
)
# If resolved, clean up context
if new_status == "resolved":
cache = get_cache_manager()
await cache.connect()
await cache.delete_context(conversation_id)
await cache.clear_messages(conversation_id)
# ============ Webhook Endpoint ============
@router.post("/chatwoot")
async def chatwoot_webhook(
request: Request,
background_tasks: BackgroundTasks
):
"""Chatwoot webhook endpoint
Receives events from Chatwoot and processes them asynchronously.
"""
# Get raw body for signature verification
body = await request.body()
# Verify signature
signature = request.headers.get("X-Chatwoot-Signature", "")
if not verify_webhook_signature(body, signature):
logger.warning("Invalid webhook signature")
raise HTTPException(status_code=401, detail="Invalid signature")
# Parse payload
try:
payload = ChatwootWebhookPayload.model_validate_json(body)
except Exception as e:
logger.error("Failed to parse webhook payload", error=str(e))
raise HTTPException(status_code=400, detail="Invalid payload")
event = payload.event
logger.debug(f"Webhook received: {event}")
# Filter out bot's own messages
if payload.message_type == "outgoing":
return {"status": "ignored", "reason": "outgoing message"}
# Filter private messages
if payload.private:
return {"status": "ignored", "reason": "private message"}
# Route by event type
if event == "message_created":
# Only process incoming messages from contacts
if payload.message_type == "incoming":
background_tasks.add_task(handle_incoming_message, payload)
elif event == "conversation_created":
background_tasks.add_task(handle_conversation_created, payload)
elif event == "conversation_status_changed":
background_tasks.add_task(handle_conversation_status_changed, payload)
elif event == "conversation_updated":
# Handle other conversation updates if needed
pass
return {"status": "accepted", "event": event}