Files
assistant/docker-compose.prod.yml

337 lines
8.5 KiB
YAML
Raw Permalink Normal View History

version: '3.8'
services:
# ============ Infrastructure ============
# Redis (Cache & Queue) - 生产环境配置
redis:
image: redis:7-alpine
container_name: ai_redis_prod
command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD:-prod_redis_password_2024}
volumes:
- redis_data_prod:/data
networks:
- ai_network_prod
healthcheck:
test: ["CMD", "redis-cli", "-a", "${REDIS_PASSWORD:-prod_redis_password_2024}", "ping"]
interval: 10s
timeout: 3s
retries: 5
restart: always
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# ============ AI Agent Layer ============
# LangGraph Agent Main Service - 生产环境
agent:
build:
context: ./agent
dockerfile: Dockerfile
args:
- ENVIRONMENT=production
image: ai-agent:latest
container_name: ai_agent_prod
environment:
# AI Model
ZHIPU_API_KEY: ${ZHIPU_API_KEY}
ZHIPU_MODEL: ${ZHIPU_MODEL:-GLM-4-Flash-250414}
ENABLE_REASONING_MODE: ${ENABLE_REASONING_MODE:-false}
REASONING_MODE_FOR_COMPLEX: ${REASONING_MODE_FOR_COMPLEX:-true}
# Redis
REDIS_HOST: redis
REDIS_PORT: 6379
REDIS_PASSWORD: ${REDIS_PASSWORD:-prod_redis_password_2024}
REDIS_DB: 0
# Chatwoot (生产环境)
CHATWOOT_API_URL: ${CHATWOOT_API_URL}
CHATWOOT_API_TOKEN: ${CHATWOOT_API_TOKEN}
CHATWOOT_WEBHOOK_SECRET: ${CHATWOOT_WEBHOOK_SECRET}
CHATWOOT_ACCOUNT_ID: ${CHATWOOT_ACCOUNT_ID:-1}
# External APIs
STRAPI_API_URL: ${STRAPI_API_URL}
STRAPI_API_TOKEN: ${STRAPI_API_TOKEN}
HYPERF_API_URL: ${HYPERF_API_URL}
HYPERF_API_TOKEN: ${HYPERF_API_TOKEN}
# Mall API
MALL_API_URL: ${MALL_API_URL}
MALL_TENANT_ID: ${MALL_TENANT_ID:-2}
MALL_CURRENCY_CODE: ${MALL_CURRENCY_CODE:-EUR}
MALL_LANGUAGE_ID: ${MALL_LANGUAGE_ID:-1}
MALL_SOURCE: ${MALL_SOURCE:-www.gaia888.com}
# Frontend URLs
FRONTEND_URL: ${FRONTEND_URL:-https://www.gaia888.com}
# MCP Servers
STRAPI_MCP_URL: http://strapi_mcp:8001
ORDER_MCP_URL: http://order_mcp:8002
AFTERSALE_MCP_URL: http://aftersale_mcp:8003
PRODUCT_MCP_URL: http://product_mcp:8004
# Config
LOG_LEVEL: ${LOG_LEVEL:-WARNING}
MAX_CONVERSATION_STEPS: ${MAX_CONVERSATION_STEPS:-10}
CONVERSATION_TIMEOUT: ${CONVERSATION_TIMEOUT:-3600}
# Production specific
ENVIRONMENT: production
SENTRY_DSN: ${SENTRY_DSN}
ports:
- "8000:8000"
volumes:
- agent_logs_prod:/app/logs
depends_on:
redis:
condition: service_healthy
strapi_mcp:
condition: service_started
order_mcp:
condition: service_started
aftersale_mcp:
condition: service_started
product_mcp:
condition: service_started
networks:
- ai_network_prod
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
cpus: '2'
memory: 2G
reservations:
cpus: '0.5'
memory: 512M
# ============ MCP Servers ============
# Strapi MCP (FAQ/Knowledge Base) - 生产环境
strapi_mcp:
build:
context: ./mcp_servers/strapi_mcp
dockerfile: Dockerfile
image: ai-strapi-mcp:latest
container_name: ai_strapi_mcp_prod
environment:
STRAPI_API_URL: ${STRAPI_API_URL}
STRAPI_API_TOKEN: ${STRAPI_API_TOKEN}
LOG_LEVEL: ${LOG_LEVEL:-WARNING}
ENVIRONMENT: production
ports:
- "8001:8001"
volumes:
- ./mcp_servers/shared:/app/shared:ro
networks:
- ai_network_prod
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8001/health"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
cpus: '0.5'
memory: 512M
reservations:
cpus: '0.25'
memory: 256M
# Order MCP - 生产环境
order_mcp:
build:
context: ./mcp_servers/order_mcp
dockerfile: Dockerfile
image: ai-order-mcp:latest
container_name: ai_order_mcp_prod
environment:
HYPERF_API_URL: ${HYPERF_API_URL}
HYPERF_API_TOKEN: ${HYPERF_API_TOKEN}
MALL_API_URL: ${MALL_API_URL}
MALL_TENANT_ID: ${MALL_TENANT_ID:-2}
MALL_CURRENCY_CODE: ${MALL_CURRENCY_CODE:-EUR}
MALL_LANGUAGE_ID: ${MALL_LANGUAGE_ID:-1}
MALL_SOURCE: ${MALL_SOURCE:-www.gaia888.com}
LOG_LEVEL: ${LOG_LEVEL:-WARNING}
ENVIRONMENT: production
ports:
- "8002:8002"
volumes:
- ./mcp_servers/shared:/app/shared:ro
networks:
- ai_network_prod
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8002/health"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
cpus: '0.5'
memory: 512M
reservations:
cpus: '0.25'
memory: 256M
# Aftersale MCP - 生产环境
aftersale_mcp:
build:
context: ./mcp_servers/aftersale_mcp
dockerfile: Dockerfile
image: ai-aftersale-mcp:latest
container_name: ai_aftersale_mcp_prod
environment:
HYPERF_API_URL: ${HYPERF_API_URL}
HYPERF_API_TOKEN: ${HYPERF_API_TOKEN}
LOG_LEVEL: ${LOG_LEVEL:-WARNING}
ENVIRONMENT: production
ports:
- "8003:8003"
volumes:
- ./mcp_servers/shared:/app/shared:ro
networks:
- ai_network_prod
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8003/health"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
cpus: '0.5'
memory: 512M
reservations:
cpus: '0.25'
memory: 256M
# Product MCP - 生产环境
product_mcp:
build:
context: ./mcp_servers/product_mcp
dockerfile: Dockerfile
image: ai-product-mcp:latest
container_name: ai_product_mcp_prod
environment:
HYPERF_API_URL: ${HYPERF_API_URL}
HYPERF_API_TOKEN: ${HYPERF_API_TOKEN}
LOG_LEVEL: ${LOG_LEVEL:-WARNING}
ENVIRONMENT: production
ports:
- "8004:8004"
volumes:
- ./mcp_servers/shared:/app/shared:ro
networks:
- ai_network_prod
restart: always
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8004/health"]
interval: 30s
timeout: 10s
retries: 3
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
deploy:
resources:
limits:
cpus: '0.5'
memory: 512M
reservations:
cpus: '0.25'
memory: 256M
# ============ Monitoring (Optional) ============
# Prometheus - 指标收集
prometheus:
image: prom/prometheus:latest
container_name: ai_prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
ports:
- "9090:9090"
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
networks:
- ai_network_prod
restart: always
profiles:
- monitoring
# Grafana - 可视化监控
grafana:
image: grafana/grafana:latest
container_name: ai_grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
ports:
- "3001:3000"
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
networks:
- ai_network_prod
restart: always
profiles:
- monitoring
networks:
ai_network_prod:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
volumes:
redis_data_prod:
agent_logs_prod:
prometheus_data:
grafana_data: