Initial commit: Add logistics and order_detail message types
Some checks failed
Lock Threads / action (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot EE docker images / merge (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot CE docker images / merge (push) Has been cancelled
Run Chatwoot CE spec / lint-backend (push) Has been cancelled
Run Chatwoot CE spec / lint-frontend (push) Has been cancelled
Run Chatwoot CE spec / frontend-tests (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (0, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (1, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (10, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (11, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (12, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (13, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (14, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (15, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (2, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (3, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (4, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (5, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (6, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (7, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (8, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (9, 16) (push) Has been cancelled
Run Linux nightly installer / nightly (push) Has been cancelled

- Add Logistics component with progress tracking
- Add OrderDetail component for order information
- Support data-driven steps and actions
- Add blue color scale to widget SCSS
- Fix node overflow and progress bar rendering issues
- Add English translations for dashboard components

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Liang XJ
2026-01-26 11:16:56 +08:00
commit 092fb2e083
7646 changed files with 975643 additions and 0 deletions

View File

@@ -0,0 +1,54 @@
class Internal::AccountAnalysis::AccountUpdaterService
def initialize(account)
@account = account
end
def update_with_analysis(analysis, error_message = nil)
if error_message
save_error(error_message)
notify_on_discord
return
end
save_analysis_results(analysis)
flag_account_if_needed(analysis)
end
private
def save_error(error_message)
@account.internal_attributes['security_flagged'] = true
@account.internal_attributes['security_flag_reason'] = "Error: #{error_message}"
@account.save
end
def save_analysis_results(analysis)
@account.internal_attributes['last_threat_scan_at'] = Time.current
@account.internal_attributes['last_threat_scan_level'] = analysis['threat_level']
@account.internal_attributes['last_threat_scan_summary'] = analysis['threat_summary']
@account.internal_attributes['last_threat_scan_recommendation'] = analysis['recommendation']
@account.save!
end
def flag_account_if_needed(analysis)
return if analysis['threat_level'] == 'none'
if %w[high medium].include?(analysis['threat_level']) ||
analysis['illegal_activities_detected'] == true ||
analysis['recommendation'] == 'block'
@account.internal_attributes['security_flagged'] = true
@account.internal_attributes['security_flag_reason'] = "Threat detected: #{analysis['threat_summary']}"
@account.save!
Rails.logger.info("Flagging account #{@account.id} due to threat level: #{analysis['threat_level']}")
end
notify_on_discord
end
def notify_on_discord
Rails.logger.info("Account #{@account.id} has been flagged for security review")
Internal::AccountAnalysis::DiscordNotifierService.new.notify_flagged_account(@account)
end
end

View File

@@ -0,0 +1,77 @@
class Internal::AccountAnalysis::ContentEvaluatorService
include Integrations::LlmInstrumentation
def initialize
Llm::Config.initialize!
end
def evaluate(content)
return default_evaluation if content.blank?
moderation_result = instrument_moderation_call(instrumentation_params(content)) do
RubyLLM.moderate(content.to_s[0...10_000])
end
build_evaluation(moderation_result)
rescue StandardError => e
handle_evaluation_error(e)
end
private
def instrumentation_params(content)
{
span_name: 'llm.internal.content_moderation',
model: 'text-moderation-latest',
input: content,
feature_name: 'content_evaluator'
}
end
def build_evaluation(result)
flagged = result.flagged?
categories = result.flagged_categories
evaluation = {
'threat_level' => flagged ? determine_threat_level(result) : 'safe',
'threat_summary' => flagged ? "Content flagged for: #{categories.join(', ')}" : 'No threats detected',
'detected_threats' => categories,
'illegal_activities_detected' => categories.any? { |c| c.include?('violence') || c.include?('self-harm') },
'recommendation' => flagged ? 'review' : 'approve'
}
log_evaluation_results(evaluation)
evaluation
end
def determine_threat_level(result)
scores = result.category_scores
max_score = scores.values.max || 0
case max_score
when 0.8.. then 'critical'
when 0.5..0.8 then 'high'
when 0.2..0.5 then 'medium'
else 'low'
end
end
def default_evaluation(error_type = nil)
{
'threat_level' => 'unknown',
'threat_summary' => 'Failed to complete content evaluation',
'detected_threats' => error_type ? [error_type] : [],
'illegal_activities_detected' => false,
'recommendation' => 'review'
}
end
def log_evaluation_results(evaluation)
Rails.logger.info("Moderation evaluation - Level: #{evaluation['threat_level']}, Threats: #{evaluation['detected_threats'].join(', ')}")
end
def handle_evaluation_error(error)
Rails.logger.error("Error evaluating content: #{error.message}")
default_evaluation('evaluation_failure')
end
end

View File

@@ -0,0 +1,47 @@
class Internal::AccountAnalysis::DiscordNotifierService
def notify_flagged_account(account)
if webhook_url.blank?
Rails.logger.error('Cannot send Discord notification: No webhook URL configured')
return
end
HTTParty.post(
webhook_url,
body: build_message(account).to_json,
headers: { 'Content-Type' => 'application/json' }
)
Rails.logger.info("Discord notification sent for flagged account #{account.id}")
rescue StandardError => e
Rails.logger.error("Error sending Discord notification: #{e.message}")
end
private
def build_message(account)
analysis = account.internal_attributes
user = account.users.order(id: :asc).first
content = <<~MESSAGE
---
An account has been flagged in our security system with the following details:
🆔 **Account Details:**
Account ID: #{account.id}
User Email: #{user&.email || 'N/A'}
Threat Level: #{analysis['last_threat_scan_level']}
🔎 **System Recommendation:** #{analysis['last_threat_scan_recommendation']}
#{analysis['illegal_activities_detected'] ? '⚠️ Potential illegal activities detected' : 'No illegal activities detected'}
📝 **Findings:**
#{analysis['last_threat_scan_summary']}
MESSAGE
{ content: content }
end
def webhook_url
@webhook_url ||= InstallationConfig.find_by(name: 'ACCOUNT_SECURITY_NOTIFICATION_WEBHOOK_URL')&.value
end
end

View File

@@ -0,0 +1,31 @@
class Internal::AccountAnalysis::PromptsService
class << self
def threat_analyser(content)
<<~PROMPT
Analyze the following website content for potential security threats, scams, or illegal activities.
Focus on identifying:
1. Phishing attempts
2. Fraudulent business practices
3. Malware distribution
4. Illegal product/service offerings
5. Money laundering indicators
6. Identity theft schemes
Always classify websites under construction or without content to be a medium.
Website content:
#{content}
Provide your analysis in the following JSON format:
{
"threat_level": "none|low|medium|high",
"threat_summary": "Brief summary of findings",
"detected_threats": ["threat1", "threat2"],
"illegal_activities_detected": true|false,
"recommendation": "approve|review|block"
}
PROMPT
end
end
end

View File

@@ -0,0 +1,43 @@
class Internal::AccountAnalysis::ThreatAnalyserService
def initialize(account)
@account = account
@user = account.users.order(id: :asc).first
@domain = extract_domain_from_email(@user&.email)
end
def perform
if @domain.blank?
Rails.logger.info("Skipping threat analysis for account #{@account.id}: No domain found")
return
end
website_content = Internal::AccountAnalysis::WebsiteScraperService.new(@domain).perform
if website_content.blank?
Rails.logger.info("Skipping threat analysis for account #{@account.id}: No website content found")
Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(nil, 'Scraping error: No content found')
return
end
content = <<~MESSAGE
Domain: #{@domain}
Content: #{website_content}
MESSAGE
threat_analysis = Internal::AccountAnalysis::ContentEvaluatorService.new.evaluate(content)
Rails.logger.info("Completed threat analysis: level=#{threat_analysis['threat_level']} for account-id: #{@account.id}")
Internal::AccountAnalysis::AccountUpdaterService.new(@account).update_with_analysis(threat_analysis)
threat_analysis
end
private
def extract_domain_from_email(email)
return nil if email.blank?
email.split('@').last
rescue StandardError => e
Rails.logger.error("Error extracting domain from email #{email}: #{e.message}")
nil
end
end

View File

@@ -0,0 +1,32 @@
class Internal::AccountAnalysis::WebsiteScraperService
def initialize(domain)
@domain = domain
end
def perform
return nil if @domain.blank?
Rails.logger.info("Scraping website: #{external_link}")
begin
response = HTTParty.get(external_link, follow_redirects: true)
response.to_s
rescue StandardError => e
Rails.logger.error("Error scraping website for domain #{@domain}: #{e.message}")
nil
end
end
private
def external_link
sanitize_url(@domain)
end
def sanitize_url(domain)
url = domain
url = "https://#{domain}" unless domain.start_with?('http://', 'https://')
Rails.logger.info("Sanitized URL: #{url}")
url
end
end

View File

@@ -0,0 +1,68 @@
class Internal::Accounts::InternalAttributesService
attr_reader :account
# List of keys that can be managed through this service
# TODO: Add account_notes field in future
# This field can be used to store notes about account on Chatwoot cloud
VALID_KEYS = %w[manually_managed_features].freeze
def initialize(account)
@account = account
end
# Get a value from internal_attributes
def get(key)
validate_key!(key)
account.internal_attributes[key]
end
# Set a value in internal_attributes
def set(key, value)
validate_key!(key)
# Create a new hash to avoid modifying the original
new_attrs = account.internal_attributes.dup || {}
new_attrs[key] = value
# Update the account
account.internal_attributes = new_attrs
account.save
end
# Get manually managed features
def manually_managed_features
get('manually_managed_features') || []
end
# Set manually managed features
def manually_managed_features=(features)
features = [] if features.nil?
features = [features] unless features.is_a?(Array)
# Clean up the array: remove empty strings, whitespace, and validate against valid features
valid_features = valid_feature_list
features = features.compact
.map(&:strip)
.reject(&:empty?)
.select { |f| valid_features.include?(f) }
.uniq
set('manually_managed_features', features)
end
# Get list of valid features that can be manually managed
def valid_feature_list
# Business and Enterprise plan features only
Enterprise::Billing::HandleStripeEventService::BUSINESS_PLAN_FEATURES +
Enterprise::Billing::HandleStripeEventService::ENTERPRISE_PLAN_FEATURES
end
# Account notes functionality removed for now
# Will be re-implemented when UI is ready
private
def validate_key!(key)
raise ArgumentError, "Invalid internal attribute key: #{key}" unless VALID_KEYS.include?(key)
end
end

View File

@@ -0,0 +1,59 @@
class Internal::ReconcilePlanConfigService
def perform
remove_premium_config_reset_warning
return if ChatwootHub.pricing_plan != 'community'
create_premium_config_reset_warning if premium_config_reset_required?
reconcile_premium_config
reconcile_premium_features
end
private
def config_path
@config_path ||= Rails.root.join('enterprise/config')
end
def premium_config
@premium_config ||= YAML.safe_load(File.read("#{config_path}/premium_installation_config.yml")).freeze
end
def remove_premium_config_reset_warning
Redis::Alfred.delete(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING)
end
def create_premium_config_reset_warning
Redis::Alfred.set(Redis::Alfred::CHATWOOT_INSTALLATION_CONFIG_RESET_WARNING, true)
end
def premium_config_reset_required?
premium_config.any? do |config|
config = config.with_indifferent_access
existing_config = InstallationConfig.find_by(name: config[:name])
existing_config&.value != config[:value] if existing_config.present?
end
end
def reconcile_premium_config
premium_config.each do |config|
new_config = config.with_indifferent_access
existing_config = InstallationConfig.find_by(name: new_config[:name])
next if existing_config&.value == new_config[:value]
existing_config&.update!(value: new_config[:value])
end
end
def premium_features
@premium_features ||= YAML.safe_load(File.read("#{config_path}/premium_features.yml")).freeze
end
def reconcile_premium_features
Account.find_in_batches do |accounts|
accounts.each do |account|
account.disable_features!(*premium_features)
end
end
end
end