mirror of
https://github.com/maybe-finance/maybe.git
synced 2025-07-19 05:09:38 +02:00
* Domain model sketch
* Scaffold out rules domain
* Migrations
* Remove existing data enrichment for clean slate
* Sketch out business logic and basic tests
* Simplify rule scope building and action executions
* Get generator working again
* Basic implementation + tests
* Remove manual merchant management (rules will replace)
* Revert "Remove manual merchant management (rules will replace)"
This reverts commit 83dcbd9ff0
.
* Family and Provider merchants model
* Fix brakeman warnings
* Fix notification loader
* Update notification position
* Add Rule action and condition registries
* Rule form with compound conditions and tests
* Split out notification types, add CTA type
* Rules form builder and Stimulus controller
* Clean up rule registry domain
* Clean up rules stimulus controller
* CTA message for rule when user changes transaction category
* Fix tests
* Lint updates
* Centralize notifications in Notifiable concern
* Implement category rule prompts with auto backoff and option to disable
* Fix layout bug caused by merge conflict
* Initialize rule with correct action for category CTA
* Add rule deletions, get rules working
* Complete dynamic rule form, split Stimulus controllers by resource
* Fix failing tests
* Change test password to avoid chromium conflicts
* Update integration tests
* Centralize all test password references
* Add re-apply rule action
* Rule confirm modal
* Run migrations
* Trigger rule notification after inline category updates
* Clean up rule styles
* Basic attribute locking for rules
* Apply attribute locks on user edits
* Log data enrichments, only apply rules to unlocked attributes
* Fix merge errors
* Additional merge conflict fixes
* Form UI improvements, ignore attribute locks on manual rule application
* Batch AI auto-categorization of transactions
* Auto merchant detection, ai enrichment in batches
* Fix Plaid merchant assignments
* Plaid category matching
* Cleanup 1
* Test cleanup
* Remove stale route
* Fix desktop chat UI issues
* Fix mobile nav styling issues
86 lines
2.5 KiB
Ruby
86 lines
2.5 KiB
Ruby
class Provider::Openai < Provider
|
|
include LlmConcept
|
|
|
|
# Subclass so errors caught in this provider are raised as Provider::Openai::Error
|
|
Error = Class.new(Provider::Error)
|
|
|
|
MODELS = %w[gpt-4o]
|
|
|
|
def initialize(access_token)
|
|
@client = ::OpenAI::Client.new(access_token: access_token)
|
|
end
|
|
|
|
def supports_model?(model)
|
|
MODELS.include?(model)
|
|
end
|
|
|
|
def auto_categorize(transactions: [], user_categories: [])
|
|
with_provider_response do
|
|
raise Error, "Too many transactions to auto-categorize. Max is 25 per request." if transactions.size > 25
|
|
|
|
AutoCategorizer.new(
|
|
client,
|
|
transactions: transactions,
|
|
user_categories: user_categories
|
|
).auto_categorize
|
|
end
|
|
end
|
|
|
|
def auto_detect_merchants(transactions: [], user_merchants: [])
|
|
with_provider_response do
|
|
raise Error, "Too many transactions to auto-detect merchants. Max is 25 per request." if transactions.size > 25
|
|
|
|
AutoMerchantDetector.new(
|
|
client,
|
|
transactions: transactions,
|
|
user_merchants: user_merchants
|
|
).auto_detect_merchants
|
|
end
|
|
end
|
|
|
|
def chat_response(prompt, model:, instructions: nil, functions: [], function_results: [], streamer: nil, previous_response_id: nil)
|
|
with_provider_response do
|
|
chat_config = ChatConfig.new(
|
|
functions: functions,
|
|
function_results: function_results
|
|
)
|
|
|
|
collected_chunks = []
|
|
|
|
# Proxy that converts raw stream to "LLM Provider concept" stream
|
|
stream_proxy = if streamer.present?
|
|
proc do |chunk|
|
|
parsed_chunk = ChatStreamParser.new(chunk).parsed
|
|
|
|
unless parsed_chunk.nil?
|
|
streamer.call(parsed_chunk)
|
|
collected_chunks << parsed_chunk
|
|
end
|
|
end
|
|
else
|
|
nil
|
|
end
|
|
|
|
raw_response = client.responses.create(parameters: {
|
|
model: model,
|
|
input: chat_config.build_input(prompt),
|
|
instructions: instructions,
|
|
tools: chat_config.tools,
|
|
previous_response_id: previous_response_id,
|
|
stream: stream_proxy
|
|
})
|
|
|
|
# If streaming, Ruby OpenAI does not return anything, so to normalize this method's API, we search
|
|
# for the "response chunk" in the stream and return it (it is already parsed)
|
|
if stream_proxy.present?
|
|
response_chunk = collected_chunks.find { |chunk| chunk.type == "response" }
|
|
response_chunk.data
|
|
else
|
|
ChatParser.new(raw_response).parsed
|
|
end
|
|
end
|
|
end
|
|
|
|
private
|
|
attr_reader :client
|
|
end
|