1
0
Fork 0
mirror of https://github.com/maybe-finance/maybe.git synced 2025-07-24 15:49:39 +02:00

improvements(ai): Improve AI streaming UI/UX interactions + better separation of AI provider responsibilities (#2039)

* Start refactor

* Interface updates

* Rework Assistant, Provider, and tests for better domain boundaries

* Consolidate and simplify OpenAI provider and provider concepts

* Clean up assistant streaming

* Improve assistant message orchestration logic

* Clean up "thinking" UI interactions

* Remove stale class

* Regenerate VCR test responses
This commit is contained in:
Zach Gollwitzer 2025-04-01 07:21:54 -04:00 committed by GitHub
parent 6331788b33
commit 5cf758bd03
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
33 changed files with 1179 additions and 624 deletions

View file

@ -1,5 +1,5 @@
class Provider::Openai < Provider
include LlmProvider
include LlmConcept
# Subclass so errors caught in this provider are raised as Provider::Openai::Error
Error = Class.new(Provider::Error)
@ -14,17 +14,46 @@ class Provider::Openai < Provider
MODELS.include?(model)
end
def chat_response(message, instructions: nil, available_functions: [], streamer: nil)
def chat_response(prompt, model:, instructions: nil, functions: [], function_results: [], streamer: nil, previous_response_id: nil)
with_provider_response do
processor = ChatResponseProcessor.new(
client: client,
message: message,
instructions: instructions,
available_functions: available_functions,
streamer: streamer
chat_config = ChatConfig.new(
functions: functions,
function_results: function_results
)
processor.process
collected_chunks = []
# Proxy that converts raw stream to "LLM Provider concept" stream
stream_proxy = if streamer.present?
proc do |chunk|
parsed_chunk = ChatStreamParser.new(chunk).parsed
unless parsed_chunk.nil?
streamer.call(parsed_chunk)
collected_chunks << parsed_chunk
end
end
else
nil
end
raw_response = client.responses.create(parameters: {
model: model,
input: chat_config.build_input(prompt),
instructions: instructions,
tools: chat_config.tools,
previous_response_id: previous_response_id,
stream: stream_proxy
})
# If streaming, Ruby OpenAI does not return anything, so to normalize this method's API, we search
# for the "response chunk" in the stream and return it (it is already parsed)
if stream_proxy.present?
response_chunk = collected_chunks.find { |chunk| chunk.type == "response" }
response_chunk.data
else
ChatParser.new(raw_response).parsed
end
end
end