feat: 初始化 B2B AI Shopping Assistant 项目
- 配置 Docker Compose 多服务编排 - 实现 Chatwoot + Agent 集成 - 配置 Strapi MCP 知识库 - 支持 7 种语言的 FAQ 系统 - 实现 LangGraph AI 工作流 Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
18
agent/core/__init__.py
Normal file
18
agent/core/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Agent core package"""
|
||||
from .state import AgentState, Intent, ConversationState, create_initial_state
|
||||
from .llm import ZhipuLLMClient, get_llm_client, Message, LLMResponse
|
||||
from .graph import create_agent_graph, get_agent_graph, process_message
|
||||
|
||||
__all__ = [
|
||||
"AgentState",
|
||||
"Intent",
|
||||
"ConversationState",
|
||||
"create_initial_state",
|
||||
"ZhipuLLMClient",
|
||||
"get_llm_client",
|
||||
"Message",
|
||||
"LLMResponse",
|
||||
"create_agent_graph",
|
||||
"get_agent_graph",
|
||||
"process_message",
|
||||
]
|
||||
404
agent/core/graph.py
Normal file
404
agent/core/graph.py
Normal file
@@ -0,0 +1,404 @@
|
||||
"""
|
||||
LangGraph workflow definition for B2B Shopping AI Assistant
|
||||
"""
|
||||
from typing import Literal
|
||||
import httpx
|
||||
|
||||
from langgraph.graph import StateGraph, END
|
||||
|
||||
from .state import AgentState, ConversationState, mark_finished, add_tool_result, set_response
|
||||
from agents.router import classify_intent, route_by_intent
|
||||
from agents.customer_service import customer_service_agent
|
||||
from agents.order import order_agent
|
||||
from agents.aftersale import aftersale_agent
|
||||
from agents.product import product_agent
|
||||
from config import settings
|
||||
from utils.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
# ============ Node Functions ============
|
||||
|
||||
async def receive_message(state: AgentState) -> AgentState:
|
||||
"""Receive and preprocess incoming message
|
||||
|
||||
This is the entry point of the workflow.
|
||||
"""
|
||||
logger.info(
|
||||
"Receiving message",
|
||||
conversation_id=state["conversation_id"],
|
||||
message_length=len(state["current_message"])
|
||||
)
|
||||
|
||||
# Add user message to history
|
||||
state["messages"].append({
|
||||
"role": "user",
|
||||
"content": state["current_message"]
|
||||
})
|
||||
|
||||
state["state"] = ConversationState.INITIAL.value
|
||||
return state
|
||||
|
||||
|
||||
async def call_mcp_tools(state: AgentState) -> AgentState:
|
||||
"""Execute pending MCP tool calls
|
||||
|
||||
Calls the appropriate MCP server based on the tool_calls in state.
|
||||
"""
|
||||
if not state["tool_calls"]:
|
||||
logger.debug("No tool calls to execute")
|
||||
return state
|
||||
|
||||
logger.info(
|
||||
"Executing MCP tools",
|
||||
tool_count=len(state["tool_calls"])
|
||||
)
|
||||
|
||||
# MCP server URL mapping
|
||||
mcp_servers = {
|
||||
"strapi": settings.strapi_mcp_url,
|
||||
"order": settings.order_mcp_url,
|
||||
"aftersale": settings.aftersale_mcp_url,
|
||||
"product": settings.product_mcp_url
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=30.0) as client:
|
||||
for tool_call in state["tool_calls"]:
|
||||
server = tool_call["server"]
|
||||
tool_name = tool_call["tool_name"]
|
||||
arguments = tool_call["arguments"]
|
||||
|
||||
server_url = mcp_servers.get(server)
|
||||
if not server_url:
|
||||
state = add_tool_result(
|
||||
state,
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
data=None,
|
||||
error=f"Unknown MCP server: {server}"
|
||||
)
|
||||
continue
|
||||
|
||||
try:
|
||||
# Call MCP tool endpoint
|
||||
response = await client.post(
|
||||
f"{server_url}/tools/{tool_name}",
|
||||
json=arguments
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
result = response.json()
|
||||
state = add_tool_result(
|
||||
state,
|
||||
tool_name=tool_name,
|
||||
success=True,
|
||||
data=result
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Tool executed successfully",
|
||||
tool=tool_name,
|
||||
server=server
|
||||
)
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
logger.error(
|
||||
"Tool HTTP error",
|
||||
tool=tool_name,
|
||||
status=e.response.status_code
|
||||
)
|
||||
state = add_tool_result(
|
||||
state,
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
data=None,
|
||||
error=f"HTTP {e.response.status_code}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Tool execution failed", tool=tool_name, error=str(e))
|
||||
state = add_tool_result(
|
||||
state,
|
||||
tool_name=tool_name,
|
||||
success=False,
|
||||
data=None,
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
# Clear pending tool calls
|
||||
state["tool_calls"] = []
|
||||
|
||||
return state
|
||||
|
||||
|
||||
async def human_handoff(state: AgentState) -> AgentState:
|
||||
"""Handle transfer to human agent
|
||||
|
||||
Sets up the state for human intervention.
|
||||
"""
|
||||
logger.info(
|
||||
"Human handoff requested",
|
||||
conversation_id=state["conversation_id"],
|
||||
reason=state.get("handoff_reason")
|
||||
)
|
||||
|
||||
state["state"] = ConversationState.HUMAN_REVIEW.value
|
||||
|
||||
# Generate handoff message
|
||||
reason = state.get("handoff_reason", "您的问题需要人工客服协助")
|
||||
state = set_response(
|
||||
state,
|
||||
f"正在为您转接人工客服,请稍候。\n转接原因:{reason}\n\n人工客服将尽快为您服务。"
|
||||
)
|
||||
|
||||
return state
|
||||
|
||||
|
||||
async def send_response(state: AgentState) -> AgentState:
|
||||
"""Finalize and send response
|
||||
|
||||
This is the final node that marks processing as complete.
|
||||
"""
|
||||
logger.info(
|
||||
"Sending response",
|
||||
conversation_id=state["conversation_id"],
|
||||
response_length=len(state.get("response", ""))
|
||||
)
|
||||
|
||||
# Add assistant response to history
|
||||
if state.get("response"):
|
||||
state["messages"].append({
|
||||
"role": "assistant",
|
||||
"content": state["response"]
|
||||
})
|
||||
|
||||
state = mark_finished(state)
|
||||
return state
|
||||
|
||||
|
||||
async def handle_error(state: AgentState) -> AgentState:
|
||||
"""Handle errors in the workflow"""
|
||||
logger.error(
|
||||
"Workflow error",
|
||||
conversation_id=state["conversation_id"],
|
||||
error=state.get("error")
|
||||
)
|
||||
|
||||
state = set_response(
|
||||
state,
|
||||
"抱歉,处理您的请求时遇到了问题。请稍后重试,或联系人工客服获取帮助。"
|
||||
)
|
||||
state = mark_finished(state)
|
||||
return state
|
||||
|
||||
|
||||
# ============ Routing Functions ============
|
||||
|
||||
def should_call_tools(state: AgentState) -> Literal["call_tools", "send_response", "back_to_agent"]:
|
||||
"""Determine if tools need to be called"""
|
||||
|
||||
# If there are pending tool calls, execute them
|
||||
if state.get("tool_calls"):
|
||||
return "call_tools"
|
||||
|
||||
# If we have a response ready, send it
|
||||
if state.get("response"):
|
||||
return "send_response"
|
||||
|
||||
# If we're waiting for info, send the question
|
||||
if state.get("state") == ConversationState.AWAITING_INFO.value:
|
||||
return "send_response"
|
||||
|
||||
# Otherwise, something went wrong
|
||||
return "send_response"
|
||||
|
||||
|
||||
def after_tools(state: AgentState) -> str:
|
||||
"""Route after tool execution
|
||||
|
||||
Returns the agent that should process the tool results.
|
||||
"""
|
||||
current_agent = state.get("current_agent")
|
||||
|
||||
# Route back to the agent that made the tool call
|
||||
agent_mapping = {
|
||||
"customer_service": "customer_service_agent",
|
||||
"order": "order_agent",
|
||||
"aftersale": "aftersale_agent",
|
||||
"product": "product_agent"
|
||||
}
|
||||
|
||||
return agent_mapping.get(current_agent, "customer_service_agent")
|
||||
|
||||
|
||||
def check_completion(state: AgentState) -> Literal["continue", "end", "error"]:
|
||||
"""Check if workflow should continue or end"""
|
||||
|
||||
# Check for errors
|
||||
if state.get("error"):
|
||||
return "error"
|
||||
|
||||
# Check if finished
|
||||
if state.get("finished"):
|
||||
return "end"
|
||||
|
||||
# Check step limit
|
||||
if state.get("step_count", 0) >= state.get("max_steps", 10):
|
||||
logger.warning("Max steps reached", conversation_id=state["conversation_id"])
|
||||
return "end"
|
||||
|
||||
return "continue"
|
||||
|
||||
|
||||
# ============ Graph Construction ============
|
||||
|
||||
def create_agent_graph() -> StateGraph:
|
||||
"""Create the main agent workflow graph
|
||||
|
||||
Returns:
|
||||
Compiled LangGraph workflow
|
||||
"""
|
||||
# Create graph with AgentState
|
||||
graph = StateGraph(AgentState)
|
||||
|
||||
# Add nodes
|
||||
graph.add_node("receive", receive_message)
|
||||
graph.add_node("classify", classify_intent)
|
||||
graph.add_node("customer_service_agent", customer_service_agent)
|
||||
graph.add_node("order_agent", order_agent)
|
||||
graph.add_node("aftersale_agent", aftersale_agent)
|
||||
graph.add_node("product_agent", product_agent)
|
||||
graph.add_node("call_tools", call_mcp_tools)
|
||||
graph.add_node("human_handoff", human_handoff)
|
||||
graph.add_node("send_response", send_response)
|
||||
graph.add_node("handle_error", handle_error)
|
||||
|
||||
# Set entry point
|
||||
graph.set_entry_point("receive")
|
||||
|
||||
# Add edges
|
||||
graph.add_edge("receive", "classify")
|
||||
|
||||
# Conditional routing based on intent
|
||||
graph.add_conditional_edges(
|
||||
"classify",
|
||||
route_by_intent,
|
||||
{
|
||||
"customer_service_agent": "customer_service_agent",
|
||||
"order_agent": "order_agent",
|
||||
"aftersale_agent": "aftersale_agent",
|
||||
"product_agent": "product_agent",
|
||||
"human_handoff": "human_handoff"
|
||||
}
|
||||
)
|
||||
|
||||
# After each agent, check if tools need to be called
|
||||
for agent_node in ["customer_service_agent", "order_agent", "aftersale_agent", "product_agent"]:
|
||||
graph.add_conditional_edges(
|
||||
agent_node,
|
||||
should_call_tools,
|
||||
{
|
||||
"call_tools": "call_tools",
|
||||
"send_response": "send_response",
|
||||
"back_to_agent": agent_node
|
||||
}
|
||||
)
|
||||
|
||||
# After tool execution, route back to appropriate agent
|
||||
graph.add_conditional_edges(
|
||||
"call_tools",
|
||||
after_tools,
|
||||
{
|
||||
"customer_service_agent": "customer_service_agent",
|
||||
"order_agent": "order_agent",
|
||||
"aftersale_agent": "aftersale_agent",
|
||||
"product_agent": "product_agent"
|
||||
}
|
||||
)
|
||||
|
||||
# Human handoff leads to send response
|
||||
graph.add_edge("human_handoff", "send_response")
|
||||
|
||||
# Error handling
|
||||
graph.add_edge("handle_error", END)
|
||||
|
||||
# Final node
|
||||
graph.add_edge("send_response", END)
|
||||
|
||||
return graph.compile()
|
||||
|
||||
|
||||
# Global compiled graph
|
||||
_compiled_graph = None
|
||||
|
||||
|
||||
def get_agent_graph():
|
||||
"""Get or create the compiled agent graph"""
|
||||
global _compiled_graph
|
||||
if _compiled_graph is None:
|
||||
_compiled_graph = create_agent_graph()
|
||||
return _compiled_graph
|
||||
|
||||
|
||||
async def process_message(
|
||||
conversation_id: str,
|
||||
user_id: str,
|
||||
account_id: str,
|
||||
message: str,
|
||||
history: list[dict] = None,
|
||||
context: dict = None
|
||||
) -> AgentState:
|
||||
"""Process a user message through the agent workflow
|
||||
|
||||
Args:
|
||||
conversation_id: Chatwoot conversation ID
|
||||
user_id: User identifier
|
||||
account_id: B2B account identifier
|
||||
message: User's message
|
||||
history: Previous conversation history
|
||||
context: Existing conversation context
|
||||
|
||||
Returns:
|
||||
Final agent state with response
|
||||
"""
|
||||
from .state import create_initial_state
|
||||
|
||||
# Create initial state
|
||||
initial_state = create_initial_state(
|
||||
conversation_id=conversation_id,
|
||||
user_id=user_id,
|
||||
account_id=account_id,
|
||||
current_message=message,
|
||||
messages=history,
|
||||
context=context
|
||||
)
|
||||
|
||||
# Get compiled graph
|
||||
graph = get_agent_graph()
|
||||
|
||||
# Run the workflow
|
||||
logger.info(
|
||||
"Starting workflow",
|
||||
conversation_id=conversation_id,
|
||||
message=message[:100]
|
||||
)
|
||||
|
||||
try:
|
||||
final_state = await graph.ainvoke(initial_state)
|
||||
|
||||
logger.info(
|
||||
"Workflow completed",
|
||||
conversation_id=conversation_id,
|
||||
intent=final_state.get("intent"),
|
||||
steps=final_state.get("step_count")
|
||||
)
|
||||
|
||||
return final_state
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Workflow failed", error=str(e))
|
||||
initial_state["error"] = str(e)
|
||||
initial_state["response"] = "抱歉,处理您的请求时遇到了问题。请稍后重试。"
|
||||
initial_state["finished"] = True
|
||||
return initial_state
|
||||
195
agent/core/llm.py
Normal file
195
agent/core/llm.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
ZhipuAI LLM Client for B2B Shopping AI Assistant
|
||||
"""
|
||||
from typing import Any, AsyncGenerator, Optional
|
||||
from dataclasses import dataclass
|
||||
|
||||
from zhipuai import ZhipuAI
|
||||
|
||||
from config import settings
|
||||
from utils.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
"""Chat message structure"""
|
||||
role: str # "system", "user", "assistant"
|
||||
content: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
"""LLM response structure"""
|
||||
content: str
|
||||
finish_reason: str
|
||||
usage: dict[str, int]
|
||||
|
||||
|
||||
class ZhipuLLMClient:
|
||||
"""ZhipuAI LLM Client wrapper"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: Optional[str] = None,
|
||||
model: Optional[str] = None
|
||||
):
|
||||
"""Initialize ZhipuAI client
|
||||
|
||||
Args:
|
||||
api_key: ZhipuAI API key, defaults to settings
|
||||
model: Model name, defaults to settings
|
||||
"""
|
||||
self.api_key = api_key or settings.zhipu_api_key
|
||||
self.model = model or settings.zhipu_model
|
||||
self._client = ZhipuAI(api_key=self.api_key)
|
||||
logger.info("ZhipuAI client initialized", model=self.model)
|
||||
|
||||
async def chat(
|
||||
self,
|
||||
messages: list[Message],
|
||||
temperature: float = 0.7,
|
||||
max_tokens: int = 2048,
|
||||
top_p: float = 0.9,
|
||||
**kwargs: Any
|
||||
) -> LLMResponse:
|
||||
"""Send chat completion request
|
||||
|
||||
Args:
|
||||
messages: List of chat messages
|
||||
temperature: Sampling temperature
|
||||
max_tokens: Maximum tokens to generate
|
||||
top_p: Top-p sampling parameter
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
LLM response with content and metadata
|
||||
"""
|
||||
formatted_messages = [
|
||||
{"role": msg.role, "content": msg.content}
|
||||
for msg in messages
|
||||
]
|
||||
|
||||
logger.debug(
|
||||
"Sending chat request",
|
||||
model=self.model,
|
||||
message_count=len(messages),
|
||||
temperature=temperature
|
||||
)
|
||||
|
||||
try:
|
||||
response = self._client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=formatted_messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
choice = response.choices[0]
|
||||
result = LLMResponse(
|
||||
content=choice.message.content,
|
||||
finish_reason=choice.finish_reason,
|
||||
usage={
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens
|
||||
}
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Chat response received",
|
||||
finish_reason=result.finish_reason,
|
||||
total_tokens=result.usage["total_tokens"]
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Chat request failed", error=str(e))
|
||||
raise
|
||||
|
||||
async def chat_with_tools(
|
||||
self,
|
||||
messages: list[Message],
|
||||
tools: list[dict[str, Any]],
|
||||
temperature: float = 0.7,
|
||||
**kwargs: Any
|
||||
) -> tuple[LLMResponse, Optional[list[dict[str, Any]]]]:
|
||||
"""Send chat completion request with tool calling
|
||||
|
||||
Args:
|
||||
messages: List of chat messages
|
||||
tools: List of tool definitions
|
||||
temperature: Sampling temperature
|
||||
**kwargs: Additional parameters
|
||||
|
||||
Returns:
|
||||
Tuple of (LLM response, tool calls if any)
|
||||
"""
|
||||
formatted_messages = [
|
||||
{"role": msg.role, "content": msg.content}
|
||||
for msg in messages
|
||||
]
|
||||
|
||||
logger.debug(
|
||||
"Sending chat request with tools",
|
||||
model=self.model,
|
||||
tool_count=len(tools)
|
||||
)
|
||||
|
||||
try:
|
||||
response = self._client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=formatted_messages,
|
||||
tools=tools,
|
||||
temperature=temperature,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
choice = response.choices[0]
|
||||
result = LLMResponse(
|
||||
content=choice.message.content or "",
|
||||
finish_reason=choice.finish_reason,
|
||||
usage={
|
||||
"prompt_tokens": response.usage.prompt_tokens,
|
||||
"completion_tokens": response.usage.completion_tokens,
|
||||
"total_tokens": response.usage.total_tokens
|
||||
}
|
||||
)
|
||||
|
||||
# Extract tool calls if present
|
||||
tool_calls = None
|
||||
if hasattr(choice.message, 'tool_calls') and choice.message.tool_calls:
|
||||
tool_calls = [
|
||||
{
|
||||
"id": tc.id,
|
||||
"type": tc.type,
|
||||
"function": {
|
||||
"name": tc.function.name,
|
||||
"arguments": tc.function.arguments
|
||||
}
|
||||
}
|
||||
for tc in choice.message.tool_calls
|
||||
]
|
||||
logger.debug("Tool calls received", tool_count=len(tool_calls))
|
||||
|
||||
return result, tool_calls
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Chat with tools request failed", error=str(e))
|
||||
raise
|
||||
|
||||
|
||||
# Global LLM client instance
|
||||
llm_client: Optional[ZhipuLLMClient] = None
|
||||
|
||||
|
||||
def get_llm_client() -> ZhipuLLMClient:
|
||||
"""Get or create global LLM client instance"""
|
||||
global llm_client
|
||||
if llm_client is None:
|
||||
llm_client = ZhipuLLMClient()
|
||||
return llm_client
|
||||
272
agent/core/state.py
Normal file
272
agent/core/state.py
Normal file
@@ -0,0 +1,272 @@
|
||||
"""
|
||||
Agent state definitions for LangGraph workflow
|
||||
"""
|
||||
from typing import Any, Optional, Literal
|
||||
from typing_extensions import TypedDict, Annotated
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Intent(str, Enum):
|
||||
"""User intent categories"""
|
||||
CUSTOMER_SERVICE = "customer_service" # FAQ, general inquiries
|
||||
ORDER = "order" # Order query, tracking, modify, cancel
|
||||
AFTERSALE = "aftersale" # Return, exchange, complaint
|
||||
PRODUCT = "product" # Search, recommend, quote
|
||||
HUMAN_HANDOFF = "human_handoff" # Transfer to human agent
|
||||
UNKNOWN = "unknown" # Cannot determine intent
|
||||
|
||||
|
||||
class ConversationState(str, Enum):
|
||||
"""Conversation state machine"""
|
||||
INITIAL = "initial"
|
||||
CLASSIFYING = "classifying"
|
||||
PROCESSING = "processing"
|
||||
AWAITING_INFO = "awaiting_info"
|
||||
TOOL_CALLING = "tool_calling"
|
||||
GENERATING = "generating"
|
||||
HUMAN_REVIEW = "human_review"
|
||||
COMPLETED = "completed"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Entity:
|
||||
"""Extracted entity from user message"""
|
||||
type: str # Entity type (order_id, product_id, date, etc.)
|
||||
value: Any # Entity value
|
||||
confidence: float # Extraction confidence (0-1)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolCall:
|
||||
"""MCP tool call record"""
|
||||
tool_name: str
|
||||
arguments: dict[str, Any]
|
||||
server: str # MCP server name (strapi, order, aftersale, product)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolResult:
|
||||
"""MCP tool execution result"""
|
||||
tool_name: str
|
||||
success: bool
|
||||
data: Any
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class AgentState(TypedDict):
|
||||
"""Main agent state for LangGraph workflow
|
||||
|
||||
This state is passed through all nodes in the workflow graph.
|
||||
"""
|
||||
|
||||
# ============ Session Information ============
|
||||
conversation_id: str # Chatwoot conversation ID
|
||||
user_id: str # User identifier
|
||||
account_id: str # B2B account identifier
|
||||
|
||||
# ============ Message Content ============
|
||||
messages: list[dict[str, Any]] # Conversation history [{role, content}]
|
||||
current_message: str # Current user message being processed
|
||||
|
||||
# ============ Intent Recognition ============
|
||||
intent: Optional[str] # Recognized intent (Intent enum value)
|
||||
intent_confidence: float # Intent confidence score (0-1)
|
||||
sub_intent: Optional[str] # Sub-intent for more specific routing
|
||||
|
||||
# ============ Entity Extraction ============
|
||||
entities: dict[str, Any] # Extracted entities {type: value}
|
||||
|
||||
# ============ Agent Routing ============
|
||||
current_agent: Optional[str] # Current processing agent name
|
||||
agent_history: list[str] # History of agents involved
|
||||
|
||||
# ============ Tool Calling ============
|
||||
tool_calls: list[dict[str, Any]] # Pending tool calls
|
||||
tool_results: list[dict[str, Any]] # Tool execution results
|
||||
|
||||
# ============ Response Generation ============
|
||||
response: Optional[str] # Generated response text
|
||||
response_type: str # Response type (text, rich, action)
|
||||
|
||||
# ============ Human Handoff ============
|
||||
requires_human: bool # Whether human intervention is needed
|
||||
handoff_reason: Optional[str] # Reason for human handoff
|
||||
|
||||
# ============ Conversation Context ============
|
||||
context: dict[str, Any] # Accumulated context (order details, etc.)
|
||||
|
||||
# ============ State Control ============
|
||||
state: str # Current conversation state
|
||||
step_count: int # Number of steps taken
|
||||
max_steps: int # Maximum allowed steps
|
||||
error: Optional[str] # Error message if any
|
||||
finished: bool # Whether processing is complete
|
||||
|
||||
|
||||
def create_initial_state(
|
||||
conversation_id: str,
|
||||
user_id: str,
|
||||
account_id: str,
|
||||
current_message: str,
|
||||
messages: Optional[list[dict[str, Any]]] = None,
|
||||
context: Optional[dict[str, Any]] = None
|
||||
) -> AgentState:
|
||||
"""Create initial agent state for a new message
|
||||
|
||||
Args:
|
||||
conversation_id: Chatwoot conversation ID
|
||||
user_id: User identifier
|
||||
account_id: B2B account identifier
|
||||
current_message: User's message to process
|
||||
messages: Previous conversation history
|
||||
context: Existing conversation context
|
||||
|
||||
Returns:
|
||||
Initialized AgentState
|
||||
"""
|
||||
return AgentState(
|
||||
# Session
|
||||
conversation_id=conversation_id,
|
||||
user_id=user_id,
|
||||
account_id=account_id,
|
||||
|
||||
# Messages
|
||||
messages=messages or [],
|
||||
current_message=current_message,
|
||||
|
||||
# Intent
|
||||
intent=None,
|
||||
intent_confidence=0.0,
|
||||
sub_intent=None,
|
||||
|
||||
# Entities
|
||||
entities={},
|
||||
|
||||
# Routing
|
||||
current_agent=None,
|
||||
agent_history=[],
|
||||
|
||||
# Tools
|
||||
tool_calls=[],
|
||||
tool_results=[],
|
||||
|
||||
# Response
|
||||
response=None,
|
||||
response_type="text",
|
||||
|
||||
# Human handoff
|
||||
requires_human=False,
|
||||
handoff_reason=None,
|
||||
|
||||
# Context
|
||||
context=context or {},
|
||||
|
||||
# Control
|
||||
state=ConversationState.INITIAL.value,
|
||||
step_count=0,
|
||||
max_steps=10,
|
||||
error=None,
|
||||
finished=False
|
||||
)
|
||||
|
||||
|
||||
# ============ State Update Helpers ============
|
||||
|
||||
def add_message(state: AgentState, role: str, content: str) -> AgentState:
|
||||
"""Add a message to the conversation history"""
|
||||
state["messages"].append({"role": role, "content": content})
|
||||
return state
|
||||
|
||||
|
||||
def set_intent(
|
||||
state: AgentState,
|
||||
intent: Intent,
|
||||
confidence: float,
|
||||
sub_intent: Optional[str] = None
|
||||
) -> AgentState:
|
||||
"""Set the recognized intent"""
|
||||
state["intent"] = intent.value
|
||||
state["intent_confidence"] = confidence
|
||||
state["sub_intent"] = sub_intent
|
||||
return state
|
||||
|
||||
|
||||
def add_entity(state: AgentState, entity_type: str, value: Any) -> AgentState:
|
||||
"""Add an extracted entity"""
|
||||
state["entities"][entity_type] = value
|
||||
return state
|
||||
|
||||
|
||||
def add_tool_call(
|
||||
state: AgentState,
|
||||
tool_name: str,
|
||||
arguments: dict[str, Any],
|
||||
server: str
|
||||
) -> AgentState:
|
||||
"""Add a tool call to pending calls"""
|
||||
state["tool_calls"].append({
|
||||
"tool_name": tool_name,
|
||||
"arguments": arguments,
|
||||
"server": server
|
||||
})
|
||||
return state
|
||||
|
||||
|
||||
def add_tool_result(
|
||||
state: AgentState,
|
||||
tool_name: str,
|
||||
success: bool,
|
||||
data: Any,
|
||||
error: Optional[str] = None
|
||||
) -> AgentState:
|
||||
"""Add a tool execution result"""
|
||||
state["tool_results"].append({
|
||||
"tool_name": tool_name,
|
||||
"success": success,
|
||||
"data": data,
|
||||
"error": error
|
||||
})
|
||||
return state
|
||||
|
||||
|
||||
def set_response(
|
||||
state: AgentState,
|
||||
response: str,
|
||||
response_type: str = "text"
|
||||
) -> AgentState:
|
||||
"""Set the generated response"""
|
||||
state["response"] = response
|
||||
state["response_type"] = response_type
|
||||
return state
|
||||
|
||||
|
||||
def request_human_handoff(
|
||||
state: AgentState,
|
||||
reason: str
|
||||
) -> AgentState:
|
||||
"""Request transfer to human agent"""
|
||||
state["requires_human"] = True
|
||||
state["handoff_reason"] = reason
|
||||
return state
|
||||
|
||||
|
||||
def update_context(state: AgentState, updates: dict[str, Any]) -> AgentState:
|
||||
"""Update conversation context"""
|
||||
state["context"].update(updates)
|
||||
return state
|
||||
|
||||
|
||||
def set_error(state: AgentState, error: str) -> AgentState:
|
||||
"""Set error state"""
|
||||
state["error"] = error
|
||||
state["state"] = ConversationState.ERROR.value
|
||||
return state
|
||||
|
||||
|
||||
def mark_finished(state: AgentState) -> AgentState:
|
||||
"""Mark processing as complete"""
|
||||
state["finished"] = True
|
||||
state["state"] = ConversationState.COMPLETED.value
|
||||
return state
|
||||
Reference in New Issue
Block a user