Initial commit: Add logistics and order_detail message types
Some checks failed
Lock Threads / action (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot EE docker images / merge (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot CE docker images / merge (push) Has been cancelled
Run Chatwoot CE spec / lint-backend (push) Has been cancelled
Run Chatwoot CE spec / lint-frontend (push) Has been cancelled
Run Chatwoot CE spec / frontend-tests (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (0, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (1, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (10, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (11, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (12, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (13, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (14, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (15, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (2, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (3, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (4, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (5, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (6, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (7, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (8, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (9, 16) (push) Has been cancelled
Run Linux nightly installer / nightly (push) Has been cancelled

- Add Logistics component with progress tracking
- Add OrderDetail component for order information
- Support data-driven steps and actions
- Add blue color scale to widget SCSS
- Fix node overflow and progress bar rendering issues
- Add English translations for dashboard components

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Liang XJ
2026-01-26 11:16:56 +08:00
commit 092fb2e083
7646 changed files with 975643 additions and 0 deletions

View File

@@ -0,0 +1,130 @@
require 'rails_helper'
RSpec.describe Internal::AccountAnalysis::AccountUpdaterService do
let(:account) { create(:account) }
let(:service) { described_class.new(account) }
let(:discord_notifier) { instance_double(Internal::AccountAnalysis::DiscordNotifierService, notify_flagged_account: true) }
before do
allow(Internal::AccountAnalysis::DiscordNotifierService).to receive(:new).and_return(discord_notifier)
allow(Rails.logger).to receive(:info)
end
describe '#update_with_analysis' do
context 'when error_message is provided' do
it 'saves the error and notifies Discord' do
service.update_with_analysis({}, 'Analysis failed')
expect(account.internal_attributes['security_flagged']).to be true
expect(account.internal_attributes['security_flag_reason']).to eq('Error: Analysis failed')
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
end
end
context 'when analysis is successful' do
let(:analysis) do
{
'threat_level' => 'none',
'threat_summary' => 'No threats detected',
'recommendation' => 'allow'
}
end
it 'saves the analysis results' do
allow(Time).to receive(:current).and_return('2023-01-01 12:00:00')
service.update_with_analysis(analysis)
expect(account.internal_attributes['last_threat_scan_at']).to eq('2023-01-01 12:00:00')
expect(account.internal_attributes['last_threat_scan_level']).to eq('none')
expect(account.internal_attributes['last_threat_scan_summary']).to eq('No threats detected')
expect(account.internal_attributes['last_threat_scan_recommendation']).to eq('allow')
end
it 'does not flag the account when threat level is none' do
service.update_with_analysis(analysis)
expect(account.internal_attributes).not_to include('security_flagged')
expect(discord_notifier).not_to have_received(:notify_flagged_account)
end
end
context 'when analysis detects high threat level' do
let(:analysis) do
{
'threat_level' => 'high',
'threat_summary' => 'Suspicious activity detected',
'recommendation' => 'review',
'illegal_activities_detected' => false
}
end
it 'flags the account and notifies Discord' do
service.update_with_analysis(analysis)
expect(account.internal_attributes['security_flagged']).to be true
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Suspicious activity detected')
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
expect(Rails.logger).to have_received(:info).with("Flagging account #{account.id} due to threat level: high")
expect(Rails.logger).to have_received(:info).with("Account #{account.id} has been flagged for security review")
end
end
context 'when analysis detects medium threat level' do
let(:analysis) do
{
'threat_level' => 'medium',
'threat_summary' => 'Potential issues found',
'recommendation' => 'review',
'illegal_activities_detected' => false
}
end
it 'flags the account and notifies Discord' do
service.update_with_analysis(analysis)
expect(account.internal_attributes['security_flagged']).to be true
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Potential issues found')
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
end
end
context 'when analysis detects illegal activities' do
let(:analysis) do
{
'threat_level' => 'low',
'threat_summary' => 'Minor issues found',
'recommendation' => 'review',
'illegal_activities_detected' => true
}
end
it 'flags the account and notifies Discord' do
service.update_with_analysis(analysis)
expect(account.internal_attributes['security_flagged']).to be true
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found')
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
end
end
context 'when analysis recommends blocking' do
let(:analysis) do
{
'threat_level' => 'low',
'threat_summary' => 'Minor issues found',
'recommendation' => 'block',
'illegal_activities_detected' => false
}
end
it 'flags the account and notifies Discord' do
service.update_with_analysis(analysis)
expect(account.internal_attributes['security_flagged']).to be true
expect(account.internal_attributes['security_flag_reason']).to eq('Threat detected: Minor issues found')
expect(discord_notifier).to have_received(:notify_flagged_account).with(account)
end
end
end
end

View File

@@ -0,0 +1,199 @@
require 'rails_helper'
RSpec.describe Internal::AccountAnalysis::ContentEvaluatorService do
let(:service) { described_class.new }
let(:content) { 'This is some test content' }
let(:mock_moderation_result) do
instance_double(
RubyLLM::Moderation,
flagged?: false,
flagged_categories: [],
category_scores: {}
)
end
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(RubyLLM).to receive(:moderate).and_return(mock_moderation_result)
end
describe '#evaluate' do
context 'when content is safe' do
it 'returns safe evaluation with approval recommendation' do
result = service.evaluate(content)
expect(result).to include(
'threat_level' => 'safe',
'threat_summary' => 'No threats detected',
'detected_threats' => [],
'illegal_activities_detected' => false,
'recommendation' => 'approve'
)
end
it 'logs the evaluation results' do
expect(Rails.logger).to receive(:info).with('Moderation evaluation - Level: safe, Threats: ')
service.evaluate(content)
end
end
context 'when content is flagged' do
let(:mock_moderation_result) do
instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: %w[harassment hate],
category_scores: { 'harassment' => 0.6, 'hate' => 0.3 }
)
end
it 'returns flagged evaluation with review recommendation' do
result = service.evaluate(content)
expect(result).to include(
'threat_level' => 'high',
'threat_summary' => 'Content flagged for: harassment, hate',
'detected_threats' => %w[harassment hate],
'illegal_activities_detected' => false,
'recommendation' => 'review'
)
end
end
context 'when content contains violence' do
let(:mock_moderation_result) do
instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['violence'],
category_scores: { 'violence' => 0.9 }
)
end
it 'marks illegal activities detected for violence' do
result = service.evaluate(content)
expect(result['illegal_activities_detected']).to be true
expect(result['threat_level']).to eq('critical')
end
end
context 'when content contains self-harm' do
let(:mock_moderation_result) do
instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['self-harm'],
category_scores: { 'self-harm' => 0.85 }
)
end
it 'marks illegal activities detected for self-harm' do
result = service.evaluate(content)
expect(result['illegal_activities_detected']).to be true
end
end
context 'when content is blank' do
let(:blank_content) { '' }
it 'returns default evaluation without calling moderation API' do
expect(RubyLLM).not_to receive(:moderate)
result = service.evaluate(blank_content)
expect(result).to include(
'threat_level' => 'unknown',
'threat_summary' => 'Failed to complete content evaluation',
'detected_threats' => [],
'illegal_activities_detected' => false,
'recommendation' => 'review'
)
end
end
context 'when error occurs during evaluation' do
before do
allow(RubyLLM).to receive(:moderate).and_raise(StandardError.new('Test error'))
end
it 'logs error and returns default evaluation with error type' do
expect(Rails.logger).to receive(:error).with('Error evaluating content: Test error')
result = service.evaluate(content)
expect(result).to include(
'threat_level' => 'unknown',
'threat_summary' => 'Failed to complete content evaluation',
'detected_threats' => ['evaluation_failure'],
'illegal_activities_detected' => false,
'recommendation' => 'review'
)
end
end
context 'with threat level determination' do
it 'returns critical for scores >= 0.8' do
mock_result = instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['harassment'],
category_scores: { 'harassment' => 0.85 }
)
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
result = service.evaluate(content)
expect(result['threat_level']).to eq('critical')
end
it 'returns high for scores between 0.5 and 0.8' do
mock_result = instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['harassment'],
category_scores: { 'harassment' => 0.65 }
)
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
result = service.evaluate(content)
expect(result['threat_level']).to eq('high')
end
it 'returns medium for scores between 0.2 and 0.5' do
mock_result = instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['harassment'],
category_scores: { 'harassment' => 0.35 }
)
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
result = service.evaluate(content)
expect(result['threat_level']).to eq('medium')
end
it 'returns low for scores below 0.2' do
mock_result = instance_double(
RubyLLM::Moderation,
flagged?: true,
flagged_categories: ['harassment'],
category_scores: { 'harassment' => 0.15 }
)
allow(RubyLLM).to receive(:moderate).and_return(mock_result)
result = service.evaluate(content)
expect(result['threat_level']).to eq('low')
end
end
context 'with content truncation' do
let(:long_content) { 'a' * 15_000 }
it 'truncates content to 10000 characters before sending to moderation' do
expect(RubyLLM).to receive(:moderate).with('a' * 10_000).and_return(mock_moderation_result)
service.evaluate(long_content)
end
end
end
end

View File

@@ -0,0 +1,73 @@
require 'rails_helper'
RSpec.describe Internal::AccountAnalysis::DiscordNotifierService do
let(:service) { described_class.new }
let(:webhook_url) { 'https://discord.com/api/webhooks/123456789/some-token' }
let(:account) do
create(
:account,
internal_attributes: {
'last_threat_scan_level' => 'high',
'last_threat_scan_recommendation' => 'review',
'illegal_activities_detected' => true,
'last_threat_scan_summary' => 'Suspicious activity detected'
}
)
end
let!(:user) { create(:user, account: account) }
before do
allow(Rails.logger).to receive(:info)
allow(Rails.logger).to receive(:error)
end
describe '#notify_flagged_account' do
context 'when webhook URL is configured' do
before do
create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url)
stub_request(:post, webhook_url).to_return(status: 200)
end
it 'sends notification to Discord webhook' do
service.notify_flagged_account(account)
expect(WebMock).to have_requested(:post, webhook_url)
.with(
body: hash_including(
content: include(
"Account ID: #{account.id}",
"User Email: #{user.email}",
'Threat Level: high',
'**System Recommendation:** review',
'⚠️ Potential illegal activities detected',
'Suspicious activity detected'
)
)
)
end
end
context 'when webhook URL is not configured' do
it 'logs error and does not make HTTP request' do
service.notify_flagged_account(account)
expect(Rails.logger).to have_received(:error)
.with('Cannot send Discord notification: No webhook URL configured')
expect(WebMock).not_to have_requested(:post, webhook_url)
end
end
context 'when HTTP request fails' do
before do
create(:installation_config, name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL', value: webhook_url)
stub_request(:post, webhook_url).to_raise(StandardError.new('Connection failed'))
end
it 'catches exception and logs error' do
service.notify_flagged_account(account)
expect(Rails.logger).to have_received(:error)
.with('Error sending Discord notification: Connection failed')
end
end
end
end

View File

@@ -0,0 +1,62 @@
# frozen_string_literal: true
require 'rails_helper'
RSpec.describe Internal::AccountAnalysis::ThreatAnalyserService do
subject { described_class.new(account) }
let(:account) { create(:account) }
let(:user) { create(:user, email: 'test@example.com', account: account) }
let(:website_scraper) { instance_double(Internal::AccountAnalysis::WebsiteScraperService) }
let(:content_evaluator) { instance_double(Internal::AccountAnalysis::ContentEvaluatorService) }
let(:account_updater) { instance_double(Internal::AccountAnalysis::AccountUpdaterService) }
let(:website_content) { 'This is the website content' }
let(:threat_analysis) { { 'threat_level' => 'medium' } }
before do
user
allow(Internal::AccountAnalysis::WebsiteScraperService).to receive(:new).with('example.com').and_return(website_scraper)
allow(Internal::AccountAnalysis::ContentEvaluatorService).to receive(:new).and_return(content_evaluator)
allow(Internal::AccountAnalysis::AccountUpdaterService).to receive(:new).with(account).and_return(account_updater)
end
describe '#perform' do
before do
allow(website_scraper).to receive(:perform).and_return(website_content)
allow(content_evaluator).to receive(:evaluate).and_return(threat_analysis)
allow(account_updater).to receive(:update_with_analysis)
allow(Rails.logger).to receive(:info)
end
it 'performs threat analysis and updates the account' do
expected_content = <<~MESSAGE
Domain: example.com
Content: This is the website content
MESSAGE
expect(website_scraper).to receive(:perform)
expect(content_evaluator).to receive(:evaluate).with(expected_content)
expect(account_updater).to receive(:update_with_analysis).with(threat_analysis)
expect(Rails.logger).to receive(:info).with("Completed threat analysis: level=medium for account-id: #{account.id}")
result = subject.perform
expect(result).to eq(threat_analysis)
end
context 'when website content is blank' do
before do
allow(website_scraper).to receive(:perform).and_return(nil)
end
it 'logs info and updates account with error' do
expect(Rails.logger).to receive(:info).with("Skipping threat analysis for account #{account.id}: No website content found")
expect(account_updater).to receive(:update_with_analysis).with(nil, 'Scraping error: No content found')
expect(content_evaluator).not_to receive(:evaluate)
result = subject.perform
expect(result).to be_nil
end
end
end
end

View File

@@ -0,0 +1,45 @@
require 'rails_helper'
RSpec.describe Internal::AccountAnalysis::WebsiteScraperService do
describe '#perform' do
let(:service) { described_class.new(domain) }
let(:html_content) { '<html><body>This is sample website content</body></html>' }
before do
allow(Rails.logger).to receive(:info)
allow(Rails.logger).to receive(:error)
end
context 'when domain is nil' do
let(:domain) { nil }
it 'returns nil' do
expect(service.perform).to be_nil
end
end
context 'when domain is present' do
let(:domain) { 'example.com' }
before do
allow(HTTParty).to receive(:get).and_return(html_content)
end
it 'returns the stripped and normalized content' do
expect(service.perform).to eq(html_content)
end
end
context 'when an error occurs' do
let(:domain) { 'example.com' }
before do
allow(HTTParty).to receive(:get).and_raise(StandardError.new('Error'))
end
it 'returns nil' do
expect(service.perform).to be_nil
end
end
end
end