Initial commit: Add logistics and order_detail message types
Some checks failed
Lock Threads / action (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot EE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot EE docker images / merge (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/amd64, ubuntu-latest) (push) Has been cancelled
Publish Chatwoot CE docker images / build (linux/arm64, ubuntu-22.04-arm) (push) Has been cancelled
Publish Chatwoot CE docker images / merge (push) Has been cancelled
Run Chatwoot CE spec / lint-backend (push) Has been cancelled
Run Chatwoot CE spec / lint-frontend (push) Has been cancelled
Run Chatwoot CE spec / frontend-tests (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (0, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (1, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (10, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (11, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (12, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (13, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (14, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (15, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (2, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (3, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (4, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (5, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (6, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (7, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (8, 16) (push) Has been cancelled
Run Chatwoot CE spec / backend-tests (9, 16) (push) Has been cancelled
Run Linux nightly installer / nightly (push) Has been cancelled

- Add Logistics component with progress tracking
- Add OrderDetail component for order information
- Support data-driven steps and actions
- Add blue color scale to widget SCSS
- Fix node overflow and progress bar rendering issues
- Add English translations for dashboard components

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Liang XJ
2026-01-26 11:16:56 +08:00
commit 092fb2e083
7646 changed files with 975643 additions and 0 deletions

View File

@@ -0,0 +1,155 @@
require 'rails_helper'
RSpec.describe Captain::Llm::ConversationFaqService do
let(:captain_assistant) { create(:captain_assistant) }
let(:conversation) { create(:conversation, first_reply_created_at: Time.zone.now) }
let(:service) { described_class.new(captain_assistant, conversation) }
let(:embedding_service) { instance_double(Captain::Llm::EmbeddingService) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:sample_faqs) do
[
{ 'question' => 'What is the purpose?', 'answer' => 'To help users.' },
{ 'question' => 'How does it work?', 'answer' => 'Through AI.' }
]
end
let(:mock_response) do
instance_double(RubyLLM::Message, content: { faqs: sample_faqs }.to_json)
end
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(Captain::Llm::EmbeddingService).to receive(:new).and_return(embedding_service)
allow(RubyLLM).to receive(:chat).and_return(mock_chat)
allow(mock_chat).to receive(:with_temperature).and_return(mock_chat)
allow(mock_chat).to receive(:with_params).and_return(mock_chat)
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
allow(mock_chat).to receive(:ask).and_return(mock_response)
end
describe '#generate_and_deduplicate' do
context 'when successful' do
before do
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([])
end
it 'creates new FAQs for valid conversation content' do
expect do
service.generate_and_deduplicate
end.to change(captain_assistant.responses, :count).by(2)
end
it 'saves FAQs with pending status linked to conversation' do
service.generate_and_deduplicate
expect(
captain_assistant.responses.pluck(:question, :answer, :status, :documentable_id)
).to contain_exactly(
['What is the purpose?', 'To help users.', 'pending', conversation.id],
['How does it work?', 'Through AI.', 'pending', conversation.id]
)
end
end
context 'without human interaction' do
let(:conversation) { create(:conversation) }
it 'returns an empty array without generating FAQs' do
expect(service.generate_and_deduplicate).to eq([])
end
it 'does not call the LLM API' do
expect(RubyLLM).not_to receive(:chat)
service.generate_and_deduplicate
end
end
context 'when finding duplicates' do
let(:existing_response) do
create(:captain_assistant_response, assistant: captain_assistant, question: 'Similar question', answer: 'Similar answer')
end
let(:similar_neighbor) do
OpenStruct.new(
id: 1,
question: existing_response.question,
answer: existing_response.answer,
neighbor_distance: 0.1
)
end
before do
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([similar_neighbor])
end
it 'filters out duplicate FAQs based on embedding similarity' do
expect do
service.generate_and_deduplicate
end.not_to change(captain_assistant.responses, :count)
end
end
context 'when LLM API fails' do
before do
allow(mock_chat).to receive(:ask).and_raise(RubyLLM::Error.new(nil, 'API Error'))
allow(Rails.logger).to receive(:error)
end
it 'returns empty array and logs the error' do
expect(Rails.logger).to receive(:error).with('LLM API Error: API Error')
expect(service.generate_and_deduplicate).to eq([])
end
end
context 'when JSON parsing fails' do
let(:invalid_response) do
instance_double(RubyLLM::Message, content: 'invalid json')
end
before do
allow(mock_chat).to receive(:ask).and_return(invalid_response)
end
it 'handles JSON parsing errors gracefully' do
expect(Rails.logger).to receive(:error).with(/Error in parsing GPT processed response:/)
expect(service.generate_and_deduplicate).to eq([])
end
end
context 'when response content is nil' do
let(:nil_response) do
instance_double(RubyLLM::Message, content: nil)
end
before do
allow(mock_chat).to receive(:ask).and_return(nil_response)
end
it 'returns empty array' do
expect(service.generate_and_deduplicate).to eq([])
end
end
end
describe 'language handling' do
context 'when conversation has different language' do
let(:account) { create(:account, locale: 'fr') }
let(:conversation) do
create(:conversation, account: account, first_reply_created_at: Time.zone.now)
end
before do
allow(embedding_service).to receive(:get_embedding).and_return([0.1, 0.2, 0.3])
allow(captain_assistant.responses).to receive(:nearest_neighbors).and_return([])
end
it 'uses account language for system prompt' do
expect(Captain::Llm::SystemPromptsService).to receive(:conversation_faq_generator)
.with('french')
.at_least(:once)
.and_call_original
service.generate_and_deduplicate
end
end
end
end

View File

@@ -0,0 +1,103 @@
require 'rails_helper'
RSpec.describe Captain::Llm::FaqGeneratorService do
let(:content) { 'Sample content for FAQ generation' }
let(:language) { 'english' }
let(:service) { described_class.new(content, language) }
let(:mock_chat) { instance_double(RubyLLM::Chat) }
let(:sample_faqs) do
[
{ 'question' => 'What is this service?', 'answer' => 'It generates FAQs.' },
{ 'question' => 'How does it work?', 'answer' => 'Using AI technology.' }
]
end
let(:mock_response) do
instance_double(RubyLLM::Message, content: { faqs: sample_faqs }.to_json)
end
before do
create(:installation_config, name: 'CAPTAIN_OPEN_AI_API_KEY', value: 'test-key')
allow(RubyLLM).to receive(:chat).and_return(mock_chat)
allow(mock_chat).to receive(:with_temperature).and_return(mock_chat)
allow(mock_chat).to receive(:with_params).and_return(mock_chat)
allow(mock_chat).to receive(:with_instructions).and_return(mock_chat)
allow(mock_chat).to receive(:ask).and_return(mock_response)
end
describe '#generate' do
context 'when successful' do
it 'returns parsed FAQs from the LLM response' do
result = service.generate
expect(result).to eq(sample_faqs)
end
it 'sends content to LLM with JSON response format' do
expect(mock_chat).to receive(:with_params).with(response_format: { type: 'json_object' }).and_return(mock_chat)
service.generate
end
it 'uses SystemPromptsService with the specified language' do
expect(Captain::Llm::SystemPromptsService).to receive(:faq_generator).with(language).at_least(:once).and_call_original
service.generate
end
end
context 'with different language' do
let(:language) { 'spanish' }
it 'passes the correct language to SystemPromptsService' do
expect(Captain::Llm::SystemPromptsService).to receive(:faq_generator).with('spanish').at_least(:once).and_call_original
service.generate
end
end
context 'when LLM API fails' do
before do
allow(mock_chat).to receive(:ask).and_raise(RubyLLM::Error.new(nil, 'API Error'))
allow(Rails.logger).to receive(:error)
end
it 'returns empty array and logs the error' do
expect(Rails.logger).to receive(:error).with('LLM API Error: API Error')
expect(service.generate).to eq([])
end
end
context 'when response content is nil' do
let(:nil_response) { instance_double(RubyLLM::Message, content: nil) }
before do
allow(mock_chat).to receive(:ask).and_return(nil_response)
end
it 'returns empty array' do
expect(service.generate).to eq([])
end
end
context 'when JSON parsing fails' do
let(:invalid_response) { instance_double(RubyLLM::Message, content: 'invalid json') }
before do
allow(mock_chat).to receive(:ask).and_return(invalid_response)
end
it 'logs error and returns empty array' do
expect(Rails.logger).to receive(:error).with(/Error in parsing GPT processed response:/)
expect(service.generate).to eq([])
end
end
context 'when response is missing faqs key' do
let(:missing_key_response) { instance_double(RubyLLM::Message, content: '{"data": []}') }
before do
allow(mock_chat).to receive(:ask).and_return(missing_key_response)
end
it 'returns empty array via KeyError rescue' do
expect(service.generate).to eq([])
end
end
end
end

View File

@@ -0,0 +1,105 @@
require 'rails_helper'
RSpec.describe Captain::Llm::PaginatedFaqGeneratorService do
let(:document) { create(:captain_document) }
let(:service) { described_class.new(document, pages_per_chunk: 5) }
let(:openai_client) { instance_double(OpenAI::Client) }
before do
# Mock OpenAI configuration
installation_config = instance_double(InstallationConfig, value: 'test-api-key')
allow(InstallationConfig).to receive(:find_by!)
.with(name: 'CAPTAIN_OPEN_AI_API_KEY')
.and_return(installation_config)
allow(OpenAI::Client).to receive(:new).and_return(openai_client)
end
describe '#generate' do
context 'when document lacks OpenAI file ID' do
before do
allow(document).to receive(:openai_file_id).and_return(nil)
end
it 'raises an error' do
expect { service.generate }.to raise_error(CustomExceptions::Pdf::FaqGenerationError)
end
end
context 'when generating FAQs from PDF pages' do
let(:faq_response) do
{
'choices' => [{
'message' => {
'content' => JSON.generate({
'faqs' => [
{ 'question' => 'What is this document about?', 'answer' => 'It explains key concepts.' }
],
'has_content' => true
})
}
}]
}
end
let(:empty_response) do
{
'choices' => [{
'message' => {
'content' => JSON.generate({
'faqs' => [],
'has_content' => false
})
}
}]
}
end
before do
allow(document).to receive(:openai_file_id).and_return('file-123')
end
it 'generates FAQs from paginated content' do
allow(openai_client).to receive(:chat).and_return(faq_response, empty_response)
faqs = service.generate
expect(faqs).to have_attributes(size: 1)
expect(faqs.first['question']).to eq('What is this document about?')
end
it 'stops when no more content' do
allow(openai_client).to receive(:chat).and_return(empty_response)
faqs = service.generate
expect(faqs).to be_empty
end
it 'respects max iterations limit' do
allow(openai_client).to receive(:chat).and_return(faq_response)
# Force max iterations
service.instance_variable_set(:@iterations_completed, 19)
service.generate
expect(service.iterations_completed).to eq(20)
end
end
end
describe '#should_continue_processing?' do
it 'stops at max iterations' do
service.instance_variable_set(:@iterations_completed, 20)
expect(service.should_continue_processing?(faqs: ['faq'], has_content: true)).to be false
end
it 'stops when no FAQs returned' do
expect(service.should_continue_processing?(faqs: [], has_content: true)).to be false
end
it 'continues when FAQs exist and under limits' do
expect(service.should_continue_processing?(faqs: ['faq'], has_content: true)).to be true
end
end
end

View File

@@ -0,0 +1,58 @@
require 'rails_helper'
RSpec.describe Captain::Llm::PdfProcessingService do
let(:document) { create(:captain_document) }
let(:service) { described_class.new(document) }
before do
# Mock OpenAI configuration
installation_config = instance_double(InstallationConfig, value: 'test-api-key')
allow(InstallationConfig).to receive(:find_by!)
.with(name: 'CAPTAIN_OPEN_AI_API_KEY')
.and_return(installation_config)
end
describe '#process' do
context 'when document already has OpenAI file ID' do
before do
allow(document).to receive(:openai_file_id).and_return('existing-file-id')
end
it 'skips upload' do
expect(document).not_to receive(:store_openai_file_id)
service.process
end
end
context 'when uploading PDF to OpenAI' do
let(:mock_client) { instance_double(OpenAI::Client) }
let(:pdf_content) { 'PDF content' }
let(:blob_double) { instance_double(ActiveStorage::Blob) }
let(:pdf_file) { instance_double(ActiveStorage::Attachment) }
before do
allow(document).to receive(:openai_file_id).and_return(nil)
allow(document).to receive(:pdf_file).and_return(pdf_file)
allow(pdf_file).to receive(:blob).and_return(blob_double)
allow(blob_double).to receive(:open).and_yield(StringIO.new(pdf_content))
allow(OpenAI::Client).to receive(:new).and_return(mock_client)
# Use a simple double for OpenAI::Files as it may not be loaded
files_api = double('files_api') # rubocop:disable RSpec/VerifiedDoubles
allow(files_api).to receive(:upload).and_return({ 'id' => 'file-abc123' })
allow(mock_client).to receive(:files).and_return(files_api)
end
it 'uploads PDF and stores file ID' do
expect(document).to receive(:store_openai_file_id).with('file-abc123')
service.process
end
it 'raises error when upload fails' do
allow(mock_client.files).to receive(:upload).and_return({ 'id' => nil })
expect { service.process }.to raise_error(CustomExceptions::Pdf::UploadError)
end
end
end
end