Initial commit: Add logistics and order_detail message types
Some checks failed
Lock Threads / action (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot EE docker images / merge (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot CE docker images / merge (push) Has been cancelled
Run Chatwoot CE spec / lint-backend (push) Has been cancelled
Run Chatwoot CE spec / lint-frontend (push) Has been cancelled
Run Chatwoot CE spec / frontend-tests (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (0, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (1, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (10, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (11, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (12, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (13, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (14, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (15, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (2, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (3, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (4, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (5, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (6, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (7, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (8, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (9, 16) (push) Has been cancelled
Run Linux nightly installer / nightly (push) Has been cancelled

- Add Logistics component with progress tracking
- Add OrderDetail component for order information
- Support data-driven steps and actions
- Add blue color scale to widget SCSS
- Fix node overflow and progress bar rendering issues
- Add English translations for dashboard components

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Liang XJ
2026-01-26 11:16:56 +08:00
commit 092fb2e083
7646 changed files with 975643 additions and 0 deletions

View File

@@ -0,0 +1,47 @@
module Captain::ChatGenerationRecorder
extend ActiveSupport::Concern
include Integrations::LlmInstrumentationConstants
private
def record_llm_generation(chat, message)
return unless valid_llm_message?(message)
# Create a generation span with model and token info for Langfuse cost calculation.
# Note: span duration will be near-zero since we create and end it immediately, but token counts are what Langfuse uses for cost calculation.
tracer.in_span("llm.captain.#{feature_name}.generation") do |span|
set_generation_span_attributes(span, chat, message)
end
rescue StandardError => e
Rails.logger.warn "Failed to record LLM generation: #{e.message}"
end
# Skip non-LLM messages (e.g., tool results that RubyLLM processes internally).
# Check for assistant role rather than token presence - some providers/streaming modes
# may not return token counts, but we still want to capture the generation for evals.
def valid_llm_message?(message)
message.respond_to?(:role) && message.role.to_s == 'assistant'
end
def set_generation_span_attributes(span, chat, message)
generation_attributes(chat, message).each do |key, value|
span.set_attribute(key, value) if value
end
end
def generation_attributes(chat, message)
{
ATTR_GEN_AI_PROVIDER => determine_provider(model),
ATTR_GEN_AI_REQUEST_MODEL => model,
ATTR_GEN_AI_REQUEST_TEMPERATURE => temperature,
ATTR_GEN_AI_USAGE_INPUT_TOKENS => message.input_tokens,
ATTR_GEN_AI_USAGE_OUTPUT_TOKENS => message.respond_to?(:output_tokens) ? message.output_tokens : nil,
ATTR_LANGFUSE_OBSERVATION_INPUT => format_input_messages(chat),
ATTR_LANGFUSE_OBSERVATION_OUTPUT => message.respond_to?(:content) ? message.content.to_s : nil
}
end
def format_input_messages(chat)
chat.messages[0...-1].map { |m| { role: m.role.to_s, content: m.content.to_s } }.to_json
end
end

View File

@@ -0,0 +1,131 @@
module Captain::ChatHelper
include Integrations::LlmInstrumentation
include Captain::ChatResponseHelper
include Captain::ChatGenerationRecorder
def request_chat_completion
log_chat_completion_request
chat = build_chat
add_messages_to_chat(chat)
with_agent_session do
response = chat.ask(conversation_messages.last[:content])
build_response(response)
end
rescue StandardError => e
Rails.logger.error "#{self.class.name} Assistant: #{@assistant.id}, Error in chat completion: #{e}"
raise e
end
private
def build_chat
llm_chat = chat(model: @model, temperature: temperature)
llm_chat = llm_chat.with_params(response_format: { type: 'json_object' })
llm_chat = setup_tools(llm_chat)
llm_chat = setup_system_instructions(llm_chat)
setup_event_handlers(llm_chat)
end
def setup_tools(llm_chat)
@tools&.each do |tool|
llm_chat = llm_chat.with_tool(tool)
end
llm_chat
end
def setup_system_instructions(chat)
system_messages = @messages.select { |m| m[:role] == 'system' || m[:role] == :system }
combined_instructions = system_messages.pluck(:content).join("\n\n")
chat.with_instructions(combined_instructions)
end
def setup_event_handlers(chat)
# NOTE: We only use on_end_message to record the generation with token counts.
# RubyLLM callbacks fire after chunks arrive, not around the API call, so
# span timing won't reflect actual API latency. But Langfuse calculates costs
# from model + token counts, so this is sufficient for cost tracking.
chat.on_end_message { |message| record_llm_generation(chat, message) }
chat.on_tool_call { |tool_call| handle_tool_call(tool_call) }
chat.on_tool_result { |result| handle_tool_result(result) }
chat
end
def handle_tool_call(tool_call)
persist_thinking_message(tool_call)
start_tool_span(tool_call)
@pending_tool_calls ||= []
@pending_tool_calls.push(tool_call)
end
def handle_tool_result(result)
end_tool_span(result)
persist_tool_completion
end
def add_messages_to_chat(chat)
conversation_messages[0...-1].each do |msg|
chat.add_message(role: msg[:role].to_sym, content: msg[:content])
end
end
def instrumentation_params(chat = nil)
{
span_name: "llm.captain.#{feature_name}",
account_id: resolved_account_id,
conversation_id: @conversation_id,
feature_name: feature_name,
model: @model,
messages: chat ? chat.messages.map { |m| { role: m.role.to_s, content: m.content.to_s } } : @messages,
temperature: temperature,
metadata: {
assistant_id: @assistant&.id
}
}
end
def conversation_messages
@messages.reject { |m| m[:role] == 'system' || m[:role] == :system }
end
def temperature
@assistant&.config&.[]('temperature').to_f || 1
end
def resolved_account_id
@account&.id || @assistant&.account_id
end
# Ensures all LLM calls and tool executions within an agentic loop
# are grouped under a single trace/session in Langfuse.
#
# Without this guard, each recursive call to request_chat_completion
# (triggered by tool calls) would create a separate trace instead of
# nesting within the existing session span.
def with_agent_session(&)
already_active = @agent_session_active
return yield if already_active
@agent_session_active = true
instrument_agent_session(instrumentation_params, &)
ensure
@agent_session_active = false unless already_active
end
# Must be implemented by including class to identify the feature for instrumentation.
# Used for Langfuse tagging and span naming.
def feature_name
raise NotImplementedError, "#{self.class.name} must implement #feature_name"
end
def log_chat_completion_request
Rails.logger.info(
"#{self.class.name} Assistant: #{@assistant.id}, Requesting chat completion
for messages #{@messages} with #{@tools&.length || 0} tools
"
)
end
end

View File

@@ -0,0 +1,52 @@
module Captain::ChatResponseHelper
private
def build_response(response)
Rails.logger.debug { "#{self.class.name} Assistant: #{@assistant.id}, Received response #{response}" }
parsed = parse_json_response(response.content)
persist_message(parsed, 'assistant')
parsed
end
def parse_json_response(content)
content = content.gsub('```json', '').gsub('```', '')
content = content.strip
JSON.parse(content)
rescue JSON::ParserError => e
Rails.logger.error "#{self.class.name} Assistant: #{@assistant.id}, Error parsing JSON response: #{e.message}"
{ 'content' => content }
end
def persist_thinking_message(tool_call)
return if @copilot_thread.blank?
tool_name = tool_call.name.to_s
persist_message(
{
'content' => "Using #{tool_name}",
'function_name' => tool_name
},
'assistant_thinking'
)
end
def persist_tool_completion
return if @copilot_thread.blank?
tool_call = @pending_tool_calls&.pop
return unless tool_call
tool_name = tool_call.name.to_s
persist_message(
{
'content' => "Completed #{tool_name}",
'function_name' => tool_name
},
'assistant_thinking'
)
end
end

View File

@@ -0,0 +1,9 @@
module Captain::FirecrawlHelper
def generate_firecrawl_token(assistant_id, account_id)
api_key = InstallationConfig.find_by(name: 'CAPTAIN_FIRECRAWL_API_KEY')&.value
return nil unless api_key
token_base = "#{api_key[-4..]}#{assistant_id}#{account_id}"
Digest::SHA256.hexdigest(token_base)
end
end

View File

@@ -0,0 +1,12 @@
module SamlAuthenticationHelper
def saml_user_attempting_password_auth?(email, sso_auth_token: nil)
return false if email.blank?
user = User.from_email(email)
return false unless user&.provider == 'saml'
return false if sso_auth_token.present? && user.valid_sso_auth_token?(sso_auth_token)
true
end
end