2026-01-14 19:25:22 +08:00
"""
Customer Service Agent - Handles FAQ and general inquiries
"""
import json
from typing import Any
from core . state import AgentState , ConversationState , add_tool_call , set_response
from core . llm import get_llm_client , Message
2026-01-16 16:28:47 +08:00
from prompts import get_prompt
2026-01-14 19:25:22 +08:00
from utils . logger import get_logger
logger = get_logger ( __name__ )
async def customer_service_agent ( state : AgentState ) - > AgentState :
""" Customer service agent node
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
Handles FAQ , company info , and general inquiries using Strapi MCP tools .
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
Args :
state : Current agent state
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
Returns :
Updated state with tool calls or response
"""
logger . info (
" Customer service agent processing " ,
conversation_id = state [ " conversation_id " ]
)
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
state [ " current_agent " ] = " customer_service "
state [ " agent_history " ] . append ( " customer_service " )
state [ " state " ] = ConversationState . PROCESSING . value
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
# Check if we have tool results to process
if state [ " tool_results " ] :
return await _generate_response_from_results ( state )
2026-01-16 16:28:47 +08:00
# Get detected language
locale = state . get ( " detected_language " , " en " )
# Auto-detect category and query FAQ
message_lower = state [ " current_message " ] . lower ( )
# 定义分类关键词
category_keywords = {
" register " : [ " register " , " sign up " , " account " , " login " , " password " , " forgot " ] ,
" order " : [ " order " , " place order " , " cancel order " , " modify order " , " change order " ] ,
" payment " : [ " pay " , " payment " , " checkout " , " voucher " , " discount " , " promo " ] ,
" shipment " : [ " ship " , " shipping " , " delivery " , " courier " , " transit " , " logistics " , " tracking " ] ,
" return " : [ " return " , " refund " , " exchange " , " defective " , " damaged " ] ,
}
# 检测分类
detected_category = None
for category , keywords in category_keywords . items ( ) :
if any ( keyword in message_lower for keyword in keywords ) :
detected_category = category
break
# 检查是否已经查询过 FAQ
tool_calls = state . get ( " tool_calls " , [ ] )
has_faq_query = any ( tc . get ( " tool_name " ) in [ " query_faq " , " search_knowledge_base " ] for tc in tool_calls )
# 如果检测到分类且未查询过 FAQ, 自动查询
if detected_category and not has_faq_query :
logger . info (
f " Auto-querying FAQ for category: { detected_category } " ,
conversation_id = state [ " conversation_id " ]
)
# 自动添加 FAQ 工具调用
state = add_tool_call (
state ,
tool_name = " query_faq " ,
arguments = {
" category " : detected_category ,
" locale " : locale ,
" limit " : 5
} ,
server = " strapi "
)
state [ " state " ] = ConversationState . TOOL_CALLING . value
return state
# 如果询问营业时间或联系方式,自动查询公司信息
if any ( keyword in message_lower for keyword in [ " opening hour " , " contact " , " address " , " phone " , " email " ] ) and not has_faq_query :
logger . info (
" Auto-querying company info " ,
conversation_id = state [ " conversation_id " ]
)
state = add_tool_call (
state ,
tool_name = " get_company_info " ,
arguments = {
" section " : " contact " ,
" locale " : locale
} ,
server = " strapi "
)
state [ " state " ] = ConversationState . TOOL_CALLING . value
return state
2026-01-14 19:25:22 +08:00
# Build messages for LLM
2026-01-16 16:28:47 +08:00
# Load prompt in detected language
system_prompt = get_prompt ( " customer_service " , locale )
2026-01-14 19:25:22 +08:00
messages = [
2026-01-16 16:28:47 +08:00
Message ( role = " system " , content = system_prompt ) ,
2026-01-14 19:25:22 +08:00
]
# Add conversation history
for msg in state [ " messages " ] [ - 6 : ] :
messages . append ( Message ( role = msg [ " role " ] , content = msg [ " content " ] ) )
# Add current message
messages . append ( Message ( role = " user " , content = state [ " current_message " ] ) )
try :
llm = get_llm_client ( )
response = await llm . chat ( messages , temperature = 0.7 )
# Parse response
content = response . content . strip ( )
if content . startswith ( " ``` " ) :
content = content . split ( " ``` " ) [ 1 ]
if content . startswith ( " json " ) :
content = content [ 4 : ]
result = json . loads ( content )
action = result . get ( " action " )
if action == " call_tool " :
# Add tool call to state
state = add_tool_call (
state ,
tool_name = result [ " tool_name " ] ,
arguments = result . get ( " arguments " , { } ) ,
server = " strapi "
)
state [ " state " ] = ConversationState . TOOL_CALLING . value
elif action == " respond " :
state = set_response ( state , result [ " response " ] )
state [ " state " ] = ConversationState . GENERATING . value
elif action == " handoff " :
state [ " requires_human " ] = True
state [ " handoff_reason " ] = result . get ( " reason " , " User request " )
return state
except json . JSONDecodeError :
# LLM returned plain text, use as response
state = set_response ( state , response . content )
return state
except Exception as e :
logger . error ( " Customer service agent failed " , error = str ( e ) )
state [ " error " ] = str ( e )
return state
async def _generate_response_from_results ( state : AgentState ) - > AgentState :
""" Generate response based on tool results """
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
# Build context from tool results
tool_context = [ ]
for result in state [ " tool_results " ] :
if result [ " success " ] :
2026-01-16 16:28:47 +08:00
tool_context . append ( f " Tool { result [ ' tool_name ' ] } returned: \n { json . dumps ( result [ ' data ' ] , ensure_ascii = False , indent = 2 ) } " )
2026-01-14 19:25:22 +08:00
else :
2026-01-16 16:28:47 +08:00
tool_context . append ( f " Tool { result [ ' tool_name ' ] } failed: { result [ ' error ' ] } " )
2026-01-14 19:25:22 +08:00
2026-01-16 16:28:47 +08:00
prompt = f """ Based on the following tool returned information, generate a response to the user.
2026-01-14 19:25:22 +08:00
2026-01-16 16:28:47 +08:00
User question : { state [ " current_message " ] }
Tool returned information :
2026-01-14 19:25:22 +08:00
{ chr ( 10 ) . join ( tool_context ) }
2026-01-16 16:28:47 +08:00
Please generate a friendly and professional response . If the tool did not return useful information , honestly inform the user and suggest other ways to get help .
Return only the response content , do not return JSON . """
2026-01-14 19:25:22 +08:00
messages = [
2026-01-16 16:28:47 +08:00
Message ( role = " system " , content = " You are a professional B2B customer service assistant, please answer user questions based on tool returned information. " ) ,
2026-01-14 19:25:22 +08:00
Message ( role = " user " , content = prompt )
]
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
try :
llm = get_llm_client ( )
response = await llm . chat ( messages , temperature = 0.7 )
state = set_response ( state , response . content )
return state
2026-01-16 16:28:47 +08:00
2026-01-14 19:25:22 +08:00
except Exception as e :
logger . error ( " Response generation failed " , error = str ( e ) )
2026-01-16 16:28:47 +08:00
state = set_response ( state , " Sorry, there was a problem processing your request. Please try again later or contact customer support. " )
2026-01-14 19:25:22 +08:00
return state