1
0
Fork 0
mirror of https://github.com/maybe-finance/maybe.git synced 2025-07-18 20:59:39 +02:00

improvements(ai): Improve AI streaming UI/UX interactions + better separation of AI provider responsibilities (#2039)

* Start refactor

* Interface updates

* Rework Assistant, Provider, and tests for better domain boundaries

* Consolidate and simplify OpenAI provider and provider concepts

* Clean up assistant streaming

* Improve assistant message orchestration logic

* Clean up "thinking" UI interactions

* Remove stale class

* Regenerate VCR test responses
This commit is contained in:
Zach Gollwitzer 2025-04-01 07:21:54 -04:00 committed by GitHub
parent 6331788b33
commit 5cf758bd03
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
33 changed files with 1179 additions and 624 deletions

View file

@ -1,184 +1,75 @@
# Orchestrates LLM interactions for chat conversations by:
# - Streaming generic provider responses
# - Persisting messages and tool calls
# - Broadcasting updates to chat UI
# - Handling provider errors
class Assistant
include Provided
include Provided, Configurable, Broadcastable
attr_reader :chat
attr_reader :chat, :instructions
class << self
def for_chat(chat)
new(chat)
config = config_for(chat)
new(chat, instructions: config[:instructions], functions: config[:functions])
end
end
def initialize(chat)
def initialize(chat, instructions: nil, functions: [])
@chat = chat
end
def streamer(model)
assistant_message = AssistantMessage.new(
chat: chat,
content: "",
ai_model: model
)
proc do |chunk|
case chunk.type
when "output_text"
stop_thinking
assistant_message.content += chunk.data
assistant_message.save!
when "function_request"
update_thinking("Analyzing your data to assist you with your question...")
when "response"
stop_thinking
assistant_message.ai_model = chunk.data.model
combined_tool_calls = chunk.data.functions.map do |tc|
ToolCall::Function.new(
provider_id: tc.id,
provider_call_id: tc.call_id,
function_name: tc.name,
function_arguments: tc.arguments,
function_result: tc.result
)
end
assistant_message.tool_calls = combined_tool_calls
assistant_message.save!
chat.update!(latest_assistant_response_id: chunk.data.id)
end
end
@instructions = instructions
@functions = functions
end
def respond_to(message)
chat.clear_error
sleep artificial_thinking_delay
provider = get_model_provider(message.ai_model)
provider.chat_response(
message,
instructions: instructions,
available_functions: functions,
streamer: streamer(message.ai_model)
assistant_message = AssistantMessage.new(
chat: chat,
content: "",
ai_model: message.ai_model
)
responder = Assistant::Responder.new(
message: message,
instructions: instructions,
function_tool_caller: function_tool_caller,
llm: get_model_provider(message.ai_model)
)
latest_response_id = chat.latest_assistant_response_id
responder.on(:output_text) do |text|
if assistant_message.content.blank?
stop_thinking
Chat.transaction do
assistant_message.append_text!(text)
chat.update_latest_response!(latest_response_id)
end
else
assistant_message.append_text!(text)
end
end
responder.on(:response) do |data|
update_thinking("Analyzing your data...")
if data[:function_tool_calls].present?
assistant_message.tool_calls = data[:function_tool_calls]
latest_response_id = data[:id]
else
chat.update_latest_response!(data[:id])
end
end
responder.respond(previous_response_id: latest_response_id)
rescue => e
stop_thinking
chat.add_error(e)
end
private
def update_thinking(thought)
chat.broadcast_update target: "thinking-indicator", partial: "chats/thinking_indicator", locals: { chat: chat, message: thought }
end
attr_reader :functions
def stop_thinking
chat.broadcast_remove target: "thinking-indicator"
end
def process_response_artifacts(data)
messages = data.messages.map do |message|
AssistantMessage.new(
chat: chat,
content: message.content,
provider_id: message.id,
ai_model: data.model,
tool_calls: data.functions.map do |fn|
ToolCall::Function.new(
provider_id: fn.id,
provider_call_id: fn.call_id,
function_name: fn.name,
function_arguments: fn.arguments,
function_result: fn.result
)
end
)
def function_tool_caller
function_instances = functions.map do |fn|
fn.new(chat.user)
end
messages.each(&:save!)
end
def instructions
<<~PROMPT
## Your identity
You are a friendly financial assistant for an open source personal finance application called "Maybe", which is short for "Maybe Finance".
## Your purpose
You help users understand their financial data by answering questions about their accounts,
transactions, income, expenses, net worth, and more.
## Your rules
Follow all rules below at all times.
### General rules
- Provide ONLY the most important numbers and insights
- Eliminate all unnecessary words and context
- Ask follow-up questions to keep the conversation going. Help educate the user about their own data and entice them to ask more questions.
- Do NOT add introductions or conclusions
- Do NOT apologize or explain limitations
### Formatting rules
- Format all responses in markdown
- Format all monetary values according to the user's preferred currency
- Format dates in the user's preferred format
#### User's preferred currency
Maybe is a multi-currency app where each user has a "preferred currency" setting.
When no currency is specified, use the user's preferred currency for formatting and displaying monetary values.
- Symbol: #{preferred_currency.symbol}
- ISO code: #{preferred_currency.iso_code}
- Default precision: #{preferred_currency.default_precision}
- Default format: #{preferred_currency.default_format}
- Separator: #{preferred_currency.separator}
- Delimiter: #{preferred_currency.delimiter}
- Date format: #{preferred_date_format}
### Rules about financial advice
You are NOT a licensed financial advisor and therefore, you should not provide any specific investment advice (such as "buy this stock", "sell that bond", "invest in crypto", etc.).
Instead, you should focus on educating the user about personal finance using their own data so they can make informed decisions.
- Do not suggest investments or financial products
- Do not make assumptions about the user's financial situation. Use the functions available to get the data you need.
### Function calling rules
- Use the functions available to you to get user financial data and enhance your responses
- For functions that require dates, use the current date as your reference point: #{Date.current}
- If you suspect that you do not have enough data to 100% accurately answer, be transparent about it and state exactly what
the data you're presenting represents and what context it is in (i.e. date range, account, etc.)
PROMPT
end
def functions
[
Assistant::Function::GetTransactions.new(chat.user),
Assistant::Function::GetAccounts.new(chat.user),
Assistant::Function::GetBalanceSheet.new(chat.user),
Assistant::Function::GetIncomeStatement.new(chat.user)
]
end
def preferred_currency
Money::Currency.new(chat.user.family.currency)
end
def preferred_date_format
chat.user.family.date_format
end
def artificial_thinking_delay
1
@function_tool_caller ||= FunctionToolCaller.new(function_instances)
end
end

View file

@ -0,0 +1,12 @@
module Assistant::Broadcastable
extend ActiveSupport::Concern
private
def update_thinking(thought)
chat.broadcast_update target: "thinking-indicator", partial: "chats/thinking_indicator", locals: { chat: chat, message: thought }
end
def stop_thinking
chat.broadcast_remove target: "thinking-indicator"
end
end

View file

@ -0,0 +1,85 @@
module Assistant::Configurable
extend ActiveSupport::Concern
class_methods do
def config_for(chat)
preferred_currency = Money::Currency.new(chat.user.family.currency)
preferred_date_format = chat.user.family.date_format
{
instructions: default_instructions(preferred_currency, preferred_date_format),
functions: default_functions
}
end
private
def default_functions
[
Assistant::Function::GetTransactions,
Assistant::Function::GetAccounts,
Assistant::Function::GetBalanceSheet,
Assistant::Function::GetIncomeStatement
]
end
def default_instructions(preferred_currency, preferred_date_format)
<<~PROMPT
## Your identity
You are a friendly financial assistant for an open source personal finance application called "Maybe", which is short for "Maybe Finance".
## Your purpose
You help users understand their financial data by answering questions about their accounts,
transactions, income, expenses, net worth, and more.
## Your rules
Follow all rules below at all times.
### General rules
- Provide ONLY the most important numbers and insights
- Eliminate all unnecessary words and context
- Ask follow-up questions to keep the conversation going. Help educate the user about their own data and entice them to ask more questions.
- Do NOT add introductions or conclusions
- Do NOT apologize or explain limitations
### Formatting rules
- Format all responses in markdown
- Format all monetary values according to the user's preferred currency
- Format dates in the user's preferred format: #{preferred_date_format}
#### User's preferred currency
Maybe is a multi-currency app where each user has a "preferred currency" setting.
When no currency is specified, use the user's preferred currency for formatting and displaying monetary values.
- Symbol: #{preferred_currency.symbol}
- ISO code: #{preferred_currency.iso_code}
- Default precision: #{preferred_currency.default_precision}
- Default format: #{preferred_currency.default_format}
- Separator: #{preferred_currency.separator}
- Delimiter: #{preferred_currency.delimiter}
### Rules about financial advice
You are NOT a licensed financial advisor and therefore, you should not provide any specific investment advice (such as "buy this stock", "sell that bond", "invest in crypto", etc.).
Instead, you should focus on educating the user about personal finance using their own data so they can make informed decisions.
- Do not suggest investments or financial products
- Do not make assumptions about the user's financial situation. Use the functions available to get the data you need.
### Function calling rules
- Use the functions available to you to get user financial data and enhance your responses
- For functions that require dates, use the current date as your reference point: #{Date.current}
- If you suspect that you do not have enough data to 100% accurately answer, be transparent about it and state exactly what
the data you're presenting represents and what context it is in (i.e. date range, account, etc.)
PROMPT
end
end
end

View file

@ -34,6 +34,15 @@ class Assistant::Function
true
end
def to_definition
{
name: name,
description: description,
params_schema: params_schema,
strict: strict_mode?
}
end
private
attr_reader :user

View file

@ -0,0 +1,37 @@
class Assistant::FunctionToolCaller
Error = Class.new(StandardError)
FunctionExecutionError = Class.new(Error)
attr_reader :functions
def initialize(functions = [])
@functions = functions
end
def fulfill_requests(function_requests)
function_requests.map do |function_request|
result = execute(function_request)
ToolCall::Function.from_function_request(function_request, result)
end
end
def function_definitions
functions.map(&:to_definition)
end
private
def execute(function_request)
fn = find_function(function_request)
fn_args = JSON.parse(function_request.function_args)
fn.call(fn_args)
rescue => e
raise FunctionExecutionError.new(
"Error calling function #{fn.name} with arguments #{fn_args}: #{e.message}"
)
end
def find_function(function_request)
functions.find { |f| f.name == function_request.function_name }
end
end

View file

@ -0,0 +1,87 @@
class Assistant::Responder
def initialize(message:, instructions:, function_tool_caller:, llm:)
@message = message
@instructions = instructions
@function_tool_caller = function_tool_caller
@llm = llm
end
def on(event_name, &block)
listeners[event_name.to_sym] << block
end
def respond(previous_response_id: nil)
# For the first response
streamer = proc do |chunk|
case chunk.type
when "output_text"
emit(:output_text, chunk.data)
when "response"
response = chunk.data
if response.function_requests.any?
handle_follow_up_response(response)
else
emit(:response, { id: response.id })
end
end
end
get_llm_response(streamer: streamer, previous_response_id: previous_response_id)
end
private
attr_reader :message, :instructions, :function_tool_caller, :llm
def handle_follow_up_response(response)
streamer = proc do |chunk|
case chunk.type
when "output_text"
emit(:output_text, chunk.data)
when "response"
# We do not currently support function executions for a follow-up response (avoid recursive LLM calls that could lead to high spend)
emit(:response, { id: chunk.data.id })
end
end
function_tool_calls = function_tool_caller.fulfill_requests(response.function_requests)
emit(:response, {
id: response.id,
function_tool_calls: function_tool_calls
})
# Get follow-up response with tool call results
get_llm_response(
streamer: streamer,
function_results: function_tool_calls.map(&:to_result),
previous_response_id: response.id
)
end
def get_llm_response(streamer:, function_results: [], previous_response_id: nil)
response = llm.chat_response(
message.content,
model: message.ai_model,
instructions: instructions,
functions: function_tool_caller.function_definitions,
function_results: function_results,
streamer: streamer,
previous_response_id: previous_response_id
)
unless response.success?
raise response.error
end
response.data
end
def emit(event_name, payload = nil)
listeners[event_name.to_sym].each { |block| block.call(payload) }
end
def listeners
@listeners ||= Hash.new { |h, k| h[k] = [] }
end
end

View file

@ -5,7 +5,8 @@ class AssistantMessage < Message
"assistant"
end
def broadcast?
true
def append_text!(text)
self.content += text
save!
end
end

View file

@ -23,15 +23,25 @@ class Chat < ApplicationRecord
end
end
def needs_assistant_response?
conversation_messages.ordered.last.role != "assistant"
end
def retry_last_message!
update!(error: nil)
last_message = conversation_messages.ordered.last
if last_message.present? && last_message.role == "user"
update!(error: nil)
ask_assistant_later(last_message)
end
end
def update_latest_response!(provider_response_id)
update!(latest_assistant_response_id: provider_response_id)
end
def add_error(e)
update! error: e.to_json
broadcast_append target: "messages", partial: "chats/error", locals: { chat: self }
@ -47,6 +57,7 @@ class Chat < ApplicationRecord
end
def ask_assistant_later(message)
clear_error
AssistantResponseJob.perform_later(message)
end

View file

@ -3,7 +3,8 @@ class DeveloperMessage < Message
"developer"
end
def broadcast?
chat.debug_mode?
end
private
def broadcast?
chat.debug_mode?
end
end

View file

@ -8,7 +8,7 @@ class Message < ApplicationRecord
failed: "failed"
}
validates :content, presence: true, allow_blank: true
validates :content, presence: true
after_create_commit -> { broadcast_append_to chat, target: "messages" }, if: :broadcast?
after_update_commit -> { broadcast_update_to chat }, if: :broadcast?
@ -17,6 +17,6 @@ class Message < ApplicationRecord
private
def broadcast?
raise NotImplementedError, "subclasses must set #broadcast?"
true
end
end

View file

@ -4,17 +4,15 @@ class Provider
Response = Data.define(:success?, :data, :error)
class Error < StandardError
attr_reader :details, :provider
attr_reader :details
def initialize(message, details: nil, provider: nil)
def initialize(message, details: nil)
super(message)
@details = details
@provider = provider
end
def as_json
{
provider: provider,
message: message,
details: details
}

View file

@ -1,6 +1,8 @@
module Provider::ExchangeRateProvider
module Provider::ExchangeRateConcept
extend ActiveSupport::Concern
Rate = Data.define(:date, :from, :to, :rate)
def fetch_exchange_rate(from:, to:, date:)
raise NotImplementedError, "Subclasses must implement #fetch_exchange_rate"
end
@ -8,7 +10,4 @@ module Provider::ExchangeRateProvider
def fetch_exchange_rates(from:, to:, start_date:, end_date:)
raise NotImplementedError, "Subclasses must implement #fetch_exchange_rates"
end
private
Rate = Data.define(:date, :from, :to, :rate)
end

View file

@ -0,0 +1,12 @@
module Provider::LlmConcept
extend ActiveSupport::Concern
ChatMessage = Data.define(:id, :output_text)
ChatStreamChunk = Data.define(:type, :data)
ChatResponse = Data.define(:id, :model, :messages, :function_requests)
ChatFunctionRequest = Data.define(:id, :call_id, :function_name, :function_args)
def chat_response(prompt, model:, instructions: nil, functions: [], function_results: [], streamer: nil, previous_response_id: nil)
raise NotImplementedError, "Subclasses must implement #chat_response"
end
end

View file

@ -1,13 +0,0 @@
module Provider::LlmProvider
extend ActiveSupport::Concern
def chat_response(message, instructions: nil, available_functions: [], streamer: nil)
raise NotImplementedError, "Subclasses must implement #chat_response"
end
private
StreamChunk = Data.define(:type, :data)
ChatResponse = Data.define(:id, :messages, :functions, :model)
Message = Data.define(:id, :content)
FunctionExecution = Data.define(:id, :call_id, :name, :arguments, :result)
end

View file

@ -1,5 +1,5 @@
class Provider::Openai < Provider
include LlmProvider
include LlmConcept
# Subclass so errors caught in this provider are raised as Provider::Openai::Error
Error = Class.new(Provider::Error)
@ -14,17 +14,46 @@ class Provider::Openai < Provider
MODELS.include?(model)
end
def chat_response(message, instructions: nil, available_functions: [], streamer: nil)
def chat_response(prompt, model:, instructions: nil, functions: [], function_results: [], streamer: nil, previous_response_id: nil)
with_provider_response do
processor = ChatResponseProcessor.new(
client: client,
message: message,
instructions: instructions,
available_functions: available_functions,
streamer: streamer
chat_config = ChatConfig.new(
functions: functions,
function_results: function_results
)
processor.process
collected_chunks = []
# Proxy that converts raw stream to "LLM Provider concept" stream
stream_proxy = if streamer.present?
proc do |chunk|
parsed_chunk = ChatStreamParser.new(chunk).parsed
unless parsed_chunk.nil?
streamer.call(parsed_chunk)
collected_chunks << parsed_chunk
end
end
else
nil
end
raw_response = client.responses.create(parameters: {
model: model,
input: chat_config.build_input(prompt),
instructions: instructions,
tools: chat_config.tools,
previous_response_id: previous_response_id,
stream: stream_proxy
})
# If streaming, Ruby OpenAI does not return anything, so to normalize this method's API, we search
# for the "response chunk" in the stream and return it (it is already parsed)
if stream_proxy.present?
response_chunk = collected_chunks.find { |chunk| chunk.type == "response" }
response_chunk.data
else
ChatParser.new(raw_response).parsed
end
end
end

View file

@ -0,0 +1,36 @@
class Provider::Openai::ChatConfig
def initialize(functions: [], function_results: [])
@functions = functions
@function_results = function_results
end
def tools
functions.map do |fn|
{
type: "function",
name: fn[:name],
description: fn[:description],
parameters: fn[:params_schema],
strict: fn[:strict]
}
end
end
def build_input(prompt)
results = function_results.map do |fn_result|
{
type: "function_call_output",
call_id: fn_result[:call_id],
output: fn_result[:output].to_json
}
end
[
{ role: "user", content: prompt },
*results
]
end
private
attr_reader :functions, :function_results
end

View file

@ -0,0 +1,59 @@
class Provider::Openai::ChatParser
Error = Class.new(StandardError)
def initialize(object)
@object = object
end
def parsed
ChatResponse.new(
id: response_id,
model: response_model,
messages: messages,
function_requests: function_requests
)
end
private
attr_reader :object
ChatResponse = Provider::LlmConcept::ChatResponse
ChatMessage = Provider::LlmConcept::ChatMessage
ChatFunctionRequest = Provider::LlmConcept::ChatFunctionRequest
def response_id
object.dig("id")
end
def response_model
object.dig("model")
end
def messages
message_items = object.dig("output").filter { |item| item.dig("type") == "message" }
message_items.map do |message_item|
ChatMessage.new(
id: message_item.dig("id"),
output_text: message_item.dig("content").map do |content|
text = content.dig("text")
refusal = content.dig("refusal")
text || refusal
end.flatten.join("\n")
)
end
end
def function_requests
function_items = object.dig("output").filter { |item| item.dig("type") == "function_call" }
function_items.map do |function_item|
ChatFunctionRequest.new(
id: function_item.dig("id"),
call_id: function_item.dig("call_id"),
function_name: function_item.dig("name"),
function_args: function_item.dig("arguments")
)
end
end
end

View file

@ -1,188 +0,0 @@
class Provider::Openai::ChatResponseProcessor
def initialize(message:, client:, instructions: nil, available_functions: [], streamer: nil)
@client = client
@message = message
@instructions = instructions
@available_functions = available_functions
@streamer = streamer
end
def process
first_response = fetch_response(previous_response_id: previous_openai_response_id)
if first_response.functions.empty?
if streamer.present?
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "response", data: first_response))
end
return first_response
end
executed_functions = execute_pending_functions(first_response.functions)
follow_up_response = fetch_response(
executed_functions: executed_functions,
previous_response_id: first_response.id
)
if streamer.present?
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "response", data: follow_up_response))
end
follow_up_response
end
private
attr_reader :client, :message, :instructions, :available_functions, :streamer
PendingFunction = Data.define(:id, :call_id, :name, :arguments)
def fetch_response(executed_functions: [], previous_response_id: nil)
function_results = executed_functions.map do |executed_function|
{
type: "function_call_output",
call_id: executed_function.call_id,
output: executed_function.result.to_json
}
end
prepared_input = input + function_results
# No need to pass tools for follow-up messages that provide function results
prepared_tools = executed_functions.empty? ? tools : []
raw_response = nil
internal_streamer = proc do |chunk|
type = chunk.dig("type")
if streamer.present?
case type
when "response.output_text.delta", "response.refusal.delta"
# We don't distinguish between text and refusal yet, so stream both the same
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "output_text", data: chunk.dig("delta")))
when "response.function_call_arguments.done"
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "function_request", data: chunk.dig("arguments")))
end
end
if type == "response.completed"
raw_response = chunk.dig("response")
end
end
client.responses.create(parameters: {
model: model,
input: prepared_input,
instructions: instructions,
tools: prepared_tools,
previous_response_id: previous_response_id,
stream: internal_streamer
})
if raw_response.dig("status") == "failed" || raw_response.dig("status") == "incomplete"
raise Provider::Openai::Error.new("OpenAI returned a failed or incomplete response", { chunk: chunk })
end
response_output = raw_response.dig("output")
functions_output = if executed_functions.any?
executed_functions
else
extract_pending_functions(response_output)
end
Provider::LlmProvider::ChatResponse.new(
id: raw_response.dig("id"),
messages: extract_messages(response_output),
functions: functions_output,
model: raw_response.dig("model")
)
end
def chat
message.chat
end
def model
message.ai_model
end
def previous_openai_response_id
chat.latest_assistant_response_id
end
# Since we're using OpenAI's conversation state management, all we need to pass
# to input is the user message we're currently responding to.
def input
[ { role: "user", content: message.content } ]
end
def extract_messages(response_output)
message_items = response_output.filter { |item| item.dig("type") == "message" }
message_items.map do |item|
output_text = item.dig("content").map do |content|
text = content.dig("text")
refusal = content.dig("refusal")
text || refusal
end.flatten.join("\n")
Provider::LlmProvider::Message.new(
id: item.dig("id"),
content: output_text,
)
end
end
def extract_pending_functions(response_output)
response_output.filter { |item| item.dig("type") == "function_call" }.map do |item|
PendingFunction.new(
id: item.dig("id"),
call_id: item.dig("call_id"),
name: item.dig("name"),
arguments: item.dig("arguments"),
)
end
end
def execute_pending_functions(pending_functions)
pending_functions.map do |pending_function|
execute_function(pending_function)
end
end
def execute_function(fn)
fn_instance = available_functions.find { |f| f.name == fn.name }
parsed_args = JSON.parse(fn.arguments)
result = fn_instance.call(parsed_args)
Provider::LlmProvider::FunctionExecution.new(
id: fn.id,
call_id: fn.call_id,
name: fn.name,
arguments: parsed_args,
result: result
)
rescue => e
fn_execution_details = {
fn_name: fn.name,
fn_args: parsed_args
}
raise Provider::Openai::Error.new(e, fn_execution_details)
end
def tools
available_functions.map do |fn|
{
type: "function",
name: fn.name,
description: fn.description,
parameters: fn.params_schema,
strict: fn.strict_mode?
}
end
end
end

View file

@ -0,0 +1,28 @@
class Provider::Openai::ChatStreamParser
Error = Class.new(StandardError)
def initialize(object)
@object = object
end
def parsed
type = object.dig("type")
case type
when "response.output_text.delta", "response.refusal.delta"
Chunk.new(type: "output_text", data: object.dig("delta"))
when "response.completed"
raw_response = object.dig("response")
Chunk.new(type: "response", data: parse_response(raw_response))
end
end
private
attr_reader :object
Chunk = Provider::LlmConcept::ChatStreamChunk
def parse_response(response)
Provider::Openai::ChatParser.new(response).parsed
end
end

View file

@ -1,13 +0,0 @@
# A stream proxy for OpenAI chat responses
#
# - Consumes an OpenAI chat response stream
# - Outputs a generic "Chat Provider Stream" interface to consumers (e.g. `Assistant`)
class Provider::Openai::ChatStreamer
def initialize(output_stream)
@output_stream = output_stream
end
def call(chunk)
@output_stream.call(chunk)
end
end

View file

@ -1,6 +1,10 @@
module Provider::SecurityProvider
module Provider::SecurityConcept
extend ActiveSupport::Concern
Security = Data.define(:symbol, :name, :logo_url, :exchange_operating_mic)
SecurityInfo = Data.define(:symbol, :name, :links, :logo_url, :description, :kind)
Price = Data.define(:security, :date, :price, :currency)
def search_securities(symbol, country_code: nil, exchange_operating_mic: nil)
raise NotImplementedError, "Subclasses must implement #search_securities"
end
@ -16,9 +20,4 @@ module Provider::SecurityProvider
def fetch_security_prices(security, start_date:, end_date:)
raise NotImplementedError, "Subclasses must implement #fetch_security_prices"
end
private
Security = Data.define(:symbol, :name, :logo_url, :exchange_operating_mic)
SecurityInfo = Data.define(:symbol, :name, :links, :logo_url, :description, :kind)
Price = Data.define(:security, :date, :price, :currency)
end

View file

@ -1,5 +1,5 @@
class Provider::Synth < Provider
include ExchangeRateProvider, SecurityProvider
include ExchangeRateConcept, SecurityConcept
# Subclass so errors caught in this provider are raised as Provider::Synth::Error
Error = Class.new(Provider::Error)

View file

@ -1,4 +1,24 @@
class ToolCall::Function < ToolCall
validates :function_name, :function_result, presence: true
validates :function_arguments, presence: true, allow_blank: true
class << self
# Translates an "LLM Concept" provider's FunctionRequest into a ToolCall::Function
def from_function_request(function_request, result)
new(
provider_id: function_request.id,
provider_call_id: function_request.call_id,
function_name: function_request.function_name,
function_arguments: function_request.function_args,
function_result: result
)
end
end
def to_result
{
call_id: provider_call_id,
output: function_result
}
end
end

View file

@ -14,9 +14,4 @@ class UserMessage < Message
def request_response
chat.ask_assistant(self)
end
private
def broadcast?
true
end
end

View file

@ -17,6 +17,7 @@
<div class="flex items-start mb-6">
<%= render "chats/ai_avatar" %>
<div class="prose prose--ai-chat"><%= markdown(assistant_message.content) %></div>
</div>
<% end %>

View file

@ -23,7 +23,7 @@
<%= render "chats/thinking_indicator", chat: @chat %>
<% end %>
<% if @chat.error.present? %>
<% if @chat.error.present? && @chat.needs_assistant_response? %>
<%= render "chats/error", chat: @chat %>
<% end %>
</div>

View file

@ -1,5 +1,4 @@
require "test_helper"
require "ostruct"
class AssistantTest < ActiveSupport::TestCase
include ProviderTestHelper
@ -8,74 +7,109 @@ class AssistantTest < ActiveSupport::TestCase
@chat = chats(:two)
@message = @chat.messages.create!(
type: "UserMessage",
content: "Help me with my finances",
content: "What is my net worth?",
ai_model: "gpt-4o"
)
@assistant = Assistant.for_chat(@chat)
@provider = mock
@assistant.expects(:get_model_provider).with("gpt-4o").returns(@provider)
end
test "responds to basic prompt" do
text_chunk = OpenStruct.new(type: "output_text", data: "Hello from assistant")
response_chunk = OpenStruct.new(
type: "response",
data: OpenStruct.new(
id: "1",
model: "gpt-4o",
messages: [
OpenStruct.new(
id: "1",
content: "Hello from assistant",
)
],
functions: []
)
)
test "errors get added to chat" do
@assistant.expects(:get_model_provider).with("gpt-4o").returns(@provider)
@provider.expects(:chat_response).with do |message, **options|
options[:streamer].call(text_chunk)
options[:streamer].call(response_chunk)
true
end
error = StandardError.new("test error")
@provider.expects(:chat_response).returns(provider_error_response(error))
assert_difference "AssistantMessage.count", 1 do
@chat.expects(:add_error).with(error).once
assert_no_difference "AssistantMessage.count" do
@assistant.respond_to(@message)
end
end
test "responds with tool function calls" do
function_request_chunk = OpenStruct.new(type: "function_request", data: "get_net_worth")
text_chunk = OpenStruct.new(type: "output_text", data: "Your net worth is $124,200")
response_chunk = OpenStruct.new(
type: "response",
data: OpenStruct.new(
id: "1",
model: "gpt-4o",
messages: [
OpenStruct.new(
id: "1",
content: "Your net worth is $124,200",
)
],
functions: [
OpenStruct.new(
id: "1",
call_id: "1",
name: "get_net_worth",
arguments: "{}",
result: "$124,200"
)
]
)
test "responds to basic prompt" do
@assistant.expects(:get_model_provider).with("gpt-4o").returns(@provider)
text_chunks = [
provider_text_chunk("I do not "),
provider_text_chunk("have the information "),
provider_text_chunk("to answer that question")
]
response_chunk = provider_response_chunk(
id: "1",
model: "gpt-4o",
messages: [ provider_message(id: "1", text: text_chunks.join) ],
function_requests: []
)
response = provider_success_response(response_chunk.data)
@provider.expects(:chat_response).with do |message, **options|
options[:streamer].call(function_request_chunk)
options[:streamer].call(text_chunk)
text_chunks.each do |text_chunk|
options[:streamer].call(text_chunk)
end
options[:streamer].call(response_chunk)
true
end.returns(response)
assert_difference "AssistantMessage.count", 1 do
@assistant.respond_to(@message)
message = @chat.messages.ordered.where(type: "AssistantMessage").last
assert_equal "I do not have the information to answer that question", message.content
assert_equal 0, message.tool_calls.size
end
end
test "responds with tool function calls" do
@assistant.expects(:get_model_provider).with("gpt-4o").returns(@provider).once
# Only first provider call executes function
Assistant::Function::GetAccounts.any_instance.stubs(:call).returns("test value").once
# Call #1: Function requests
call1_response_chunk = provider_response_chunk(
id: "1",
model: "gpt-4o",
messages: [],
function_requests: [
provider_function_request(id: "1", call_id: "1", function_name: "get_accounts", function_args: "{}")
]
)
call1_response = provider_success_response(call1_response_chunk.data)
# Call #2: Text response (that uses function results)
call2_text_chunks = [
provider_text_chunk("Your net worth is "),
provider_text_chunk("$124,200")
]
call2_response_chunk = provider_response_chunk(
id: "2",
model: "gpt-4o",
messages: [ provider_message(id: "1", text: call2_text_chunks.join) ],
function_requests: []
)
call2_response = provider_success_response(call2_response_chunk.data)
sequence = sequence("provider_chat_response")
@provider.expects(:chat_response).with do |message, **options|
call2_text_chunks.each do |text_chunk|
options[:streamer].call(text_chunk)
end
options[:streamer].call(call2_response_chunk)
true
end.returns(call2_response).once.in_sequence(sequence)
@provider.expects(:chat_response).with do |message, **options|
options[:streamer].call(call1_response_chunk)
true
end.returns(call1_response).once.in_sequence(sequence)
assert_difference "AssistantMessage.count", 1 do
@assistant.respond_to(@message)
@ -83,4 +117,34 @@ class AssistantTest < ActiveSupport::TestCase
assert_equal 1, message.tool_calls.size
end
end
private
def provider_function_request(id:, call_id:, function_name:, function_args:)
Provider::LlmConcept::ChatFunctionRequest.new(
id: id,
call_id: call_id,
function_name: function_name,
function_args: function_args
)
end
def provider_message(id:, text:)
Provider::LlmConcept::ChatMessage.new(id: id, output_text: text)
end
def provider_text_chunk(text)
Provider::LlmConcept::ChatStreamChunk.new(type: "output_text", data: text)
end
def provider_response_chunk(id:, model:, messages:, function_requests:)
Provider::LlmConcept::ChatStreamChunk.new(
type: "response",
data: Provider::LlmConcept::ChatResponse.new(
id: id,
model: model,
messages: messages,
function_requests: function_requests
)
)
end
end

View file

@ -6,16 +6,11 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
setup do
@subject = @openai = Provider::Openai.new(ENV.fetch("OPENAI_ACCESS_TOKEN", "test-openai-token"))
@subject_model = "gpt-4o"
@chat = chats(:two)
end
test "openai errors are automatically raised" do
VCR.use_cassette("openai/chat/error") do
response = @openai.chat_response(UserMessage.new(
chat: @chat,
content: "Error test",
ai_model: "invalid-model-that-will-trigger-api-error"
))
response = @openai.chat_response("Test", model: "invalid-model-that-will-trigger-api-error")
assert_not response.success?
assert_kind_of Provider::Openai::Error, response.error
@ -24,113 +19,145 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
test "basic chat response" do
VCR.use_cassette("openai/chat/basic_response") do
message = @chat.messages.create!(
type: "UserMessage",
content: "This is a chat test. If it's working, respond with a single word: Yes",
ai_model: @subject_model
response = @subject.chat_response(
"This is a chat test. If it's working, respond with a single word: Yes",
model: @subject_model
)
response = @subject.chat_response(message)
assert response.success?
assert_equal 1, response.data.messages.size
assert_includes response.data.messages.first.content, "Yes"
assert_includes response.data.messages.first.output_text, "Yes"
end
end
test "streams basic chat response" do
VCR.use_cassette("openai/chat/basic_response") do
VCR.use_cassette("openai/chat/basic_streaming_response") do
collected_chunks = []
mock_streamer = proc do |chunk|
collected_chunks << chunk
end
message = @chat.messages.create!(
type: "UserMessage",
content: "This is a chat test. If it's working, respond with a single word: Yes",
ai_model: @subject_model
response = @subject.chat_response(
"This is a chat test. If it's working, respond with a single word: Yes",
model: @subject_model,
streamer: mock_streamer
)
@subject.chat_response(message, streamer: mock_streamer)
tool_call_chunks = collected_chunks.select { |chunk| chunk.type == "function_request" }
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" }
response_chunks = collected_chunks.select { |chunk| chunk.type == "response" }
assert_equal 1, text_chunks.size
assert_equal 1, response_chunks.size
assert_equal 0, tool_call_chunks.size
assert_equal "Yes", text_chunks.first.data
assert_equal "Yes", response_chunks.first.data.messages.first.content
assert_equal "Yes", response_chunks.first.data.messages.first.output_text
assert_equal response_chunks.first.data, response.data
end
end
test "chat response with tool calls" do
VCR.use_cassette("openai/chat/tool_calls") do
response = @subject.chat_response(
tool_call_message,
test "chat response with function calls" do
VCR.use_cassette("openai/chat/function_calls") do
prompt = "What is my net worth?"
functions = [
{
name: "get_net_worth",
description: "Gets a user's net worth",
params_schema: { type: "object", properties: {}, required: [], additionalProperties: false },
strict: true
}
]
first_response = @subject.chat_response(
prompt,
model: @subject_model,
instructions: "Use the tools available to you to answer the user's question.",
available_functions: [ PredictableToolFunction.new(@chat) ]
functions: functions
)
assert response.success?
assert_equal 1, response.data.functions.size
assert_equal 1, response.data.messages.size
assert_includes response.data.messages.first.content, PredictableToolFunction.expected_test_result
assert first_response.success?
function_request = first_response.data.function_requests.first
assert function_request.present?
second_response = @subject.chat_response(
prompt,
model: @subject_model,
function_results: [ {
call_id: function_request.call_id,
output: { amount: 10000, currency: "USD" }.to_json
} ],
previous_response_id: first_response.data.id
)
assert second_response.success?
assert_equal 1, second_response.data.messages.size
assert_includes second_response.data.messages.first.output_text, "$10,000"
end
end
test "streams chat response with tool calls" do
VCR.use_cassette("openai/chat/tool_calls") do
test "streams chat response with function calls" do
VCR.use_cassette("openai/chat/streaming_function_calls") do
collected_chunks = []
mock_streamer = proc do |chunk|
collected_chunks << chunk
end
prompt = "What is my net worth?"
functions = [
{
name: "get_net_worth",
description: "Gets a user's net worth",
params_schema: { type: "object", properties: {}, required: [], additionalProperties: false },
strict: true
}
]
# Call #1: First streaming call, will return a function request
@subject.chat_response(
tool_call_message,
prompt,
model: @subject_model,
instructions: "Use the tools available to you to answer the user's question.",
available_functions: [ PredictableToolFunction.new(@chat) ],
functions: functions,
streamer: mock_streamer
)
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" }
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" }
tool_call_chunks = collected_chunks.select { |chunk| chunk.type == "function_request" }
response_chunks = collected_chunks.select { |chunk| chunk.type == "response" }
assert_equal 1, tool_call_chunks.count
assert text_chunks.count >= 1
assert_equal 1, response_chunks.count
assert_equal 0, text_chunks.size
assert_equal 1, response_chunks.size
assert_includes response_chunks.first.data.messages.first.content, PredictableToolFunction.expected_test_result
first_response = response_chunks.first.data
function_request = first_response.function_requests.first
# Reset collected chunks for the second call
collected_chunks = []
# Call #2: Second streaming call, will return a function result
@subject.chat_response(
prompt,
model: @subject_model,
function_results: [
{
call_id: function_request.call_id,
output: { amount: 10000, currency: "USD" }
}
],
previous_response_id: first_response.id,
streamer: mock_streamer
)
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" }
response_chunks = collected_chunks.select { |chunk| chunk.type == "response" }
assert text_chunks.size >= 1
assert_equal 1, response_chunks.size
assert_includes response_chunks.first.data.messages.first.output_text, "$10,000"
end
end
private
def tool_call_message
UserMessage.new(chat: @chat, content: "What is my net worth?", ai_model: @subject_model)
end
class PredictableToolFunction < Assistant::Function
class << self
def expected_test_result
"$124,200"
end
def name
"get_net_worth"
end
def description
"Gets user net worth data"
end
end
def call(params = {})
self.class.expected_test_result
end
end
end

View file

@ -6,7 +6,7 @@ http_interactions:
body:
encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"This is a chat
test. If it''s working, respond with a single word: Yes"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":true}'
test. If it''s working, respond with a single word: Yes"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":null}'
headers:
Content-Type:
- application/json
@ -24,9 +24,9 @@ http_interactions:
message: OK
headers:
Date:
- Wed, 26 Mar 2025 21:27:38 GMT
- Mon, 31 Mar 2025 20:38:55 GMT
Content-Type:
- text/event-stream; charset=utf-8
- application/json
Transfer-Encoding:
- chunked
Connection:
@ -36,57 +36,85 @@ http_interactions:
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_8fce503a4c5be145dda20867925b1622
- req_f99033a5841a7d9357ee08d301ad634e
Openai-Processing-Ms:
- '103'
- '713'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=o5kysxtwKJs3TPoOquM0X4MkyLIaylWhRd8LhagxXck-1743024458-1.0.1.1-ol6ndVCx6dHLGnc9.YmKYwgfOBqhSZSBpIHg4STCi4OBhrgt70FYPmMptrYDvg.SoFuS5RAS_pGiNNWXHspHio3gTfJ87vIdT936GYHIDrc;
path=/; expires=Wed, 26-Mar-25 21:57:38 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=UOaolWyAE3WXhLfg9c3KmO4d_Nq6t9cedTfZ6hznYEE-1743453535-1.0.1.1-GyQq_xeRpsyxxp8QQja5Bvo2XqUGfXHNGehtQoPV.BIgyLbERSIqJAK0IEKcYgpuLCyvQdlMNGqtdBHB6r5XMPHjOSMN1bTQYJHLsvlD5Z4;
path=/; expires=Mon, 31-Mar-25 21:08:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=Iqk8pY6uwz2lLhdKt0PwWTdtYQUqqvS6xmP9DMVko2A-1743024458829-0.0.1.1-604800000;
- _cfuvid=_zDj2dj75eLeGSzZxpBpzHxYg4gJpEfQpcnT9aCJXqM-1743453535930-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9269bbb21b1ecf43-CMH
- 9292a7325d09cf53-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: UTF-8
string: |+
event: response.created
data: {"type":"response.created","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.in_progress
data: {"type":"response.in_progress","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.output_item.added
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"in_progress","role":"assistant","content":[]}}
event: response.content_part.added
data: {"type":"response.content_part.added","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"delta":"Yes"}
event: response.output_text.done
data: {"type":"response.output_text.done","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"text":"Yes"}
event: response.content_part.done
data: {"type":"response.content_part.done","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Yes","annotations":[]}}
event: response.output_item.done
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}}
event: response.completed
data: {"type":"response.completed","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":43,"input_tokens_details":{"cached_tokens":0},"output_tokens":2,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":45},"user":null,"metadata":{}}}
recorded_at: Wed, 26 Mar 2025 21:27:39 GMT
encoding: ASCII-8BIT
string: |-
{
"id": "resp_67eafd5f2b7c81928d6834e7f4d26deb0bfadc995fda2b45",
"object": "response",
"created_at": 1743453535,
"status": "completed",
"error": null,
"incomplete_details": null,
"instructions": null,
"max_output_tokens": null,
"model": "gpt-4o-2024-08-06",
"output": [
{
"type": "message",
"id": "msg_67eafd5fba44819287b79107821a818b0bfadc995fda2b45",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "Yes",
"annotations": []
}
]
}
],
"parallel_tool_calls": true,
"previous_response_id": null,
"reasoning": {
"effort": null,
"generate_summary": null
},
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
}
},
"tool_choice": "auto",
"tools": [],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 25,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 2,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 27
},
"user": null,
"metadata": {}
}
recorded_at: Mon, 31 Mar 2025 20:38:55 GMT
recorded_with: VCR 6.3.1
...

View file

@ -0,0 +1,92 @@
---
http_interactions:
- request:
method: post
uri: https://api.openai.com/v1/responses
body:
encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"This is a chat
test. If it''s working, respond with a single word: Yes"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":true}'
headers:
Content-Type:
- application/json
Authorization:
- Bearer <OPENAI_ACCESS_TOKEN>
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
User-Agent:
- Ruby
response:
status:
code: 200
message: OK
headers:
Date:
- Mon, 31 Mar 2025 20:38:55 GMT
Content-Type:
- text/event-stream; charset=utf-8
Transfer-Encoding:
- chunked
Connection:
- keep-alive
Openai-Version:
- '2020-10-01'
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_d88b2a28252a098fe9f6e1223baebad8
Openai-Processing-Ms:
- '124'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=wP2ENU9eOGUSzQ8wOjb31UiZAZVX021QgA1NuYcfKeo-1743453535-1.0.1.1-d08X7zX7cf._5LTGrF6qL17AtdgsKpEWLWnZ0dl5KgPWXEK.oqoDgoQ_pa8j5rKYZkeZUDxMhcpP266z9tJpPJ2ZPX8bkZYAjlnlcOa5.JM;
path=/; expires=Mon, 31-Mar-25 21:08:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=F6OIQe1fgGYxb6xer0VjBA1aHrf6osX7wJU6adYsMy0-1743453535321-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9292a7324c3dcf78-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: UTF-8
string: |+
event: response.created
data: {"type":"response.created","response":{"id":"resp_67eafd5f2b90819288af54361ff81a100e51d01dbd4ed330","object":"response","created_at":1743453535,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.in_progress
data: {"type":"response.in_progress","response":{"id":"resp_67eafd5f2b90819288af54361ff81a100e51d01dbd4ed330","object":"response","created_at":1743453535,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.output_item.added
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","status":"in_progress","role":"assistant","content":[]}}
event: response.content_part.added
data: {"type":"response.content_part.added","item_id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","output_index":0,"content_index":0,"delta":"Yes"}
event: response.output_text.done
data: {"type":"response.output_text.done","item_id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","output_index":0,"content_index":0,"text":"Yes"}
event: response.content_part.done
data: {"type":"response.content_part.done","item_id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Yes","annotations":[]}}
event: response.output_item.done
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}}
event: response.completed
data: {"type":"response.completed","response":{"id":"resp_67eafd5f2b90819288af54361ff81a100e51d01dbd4ed330","object":"response","created_at":1743453535,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67eafd5f7c048192a24ce545ebfd908a0e51d01dbd4ed330","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":25,"input_tokens_details":{"cached_tokens":0},"output_tokens":2,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":27},"user":null,"metadata":{}}}
recorded_at: Mon, 31 Mar 2025 20:38:55 GMT
recorded_with: VCR 6.3.1
...

View file

@ -5,8 +5,7 @@ http_interactions:
uri: https://api.openai.com/v1/responses
body:
encoding: UTF-8
string: '{"model":"invalid-model-that-will-trigger-api-error","input":[{"role":"user","content":"Error
test"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":true}'
string: '{"model":"invalid-model-that-will-trigger-api-error","input":[{"role":"user","content":"Test"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":null}'
headers:
Content-Type:
- application/json
@ -24,7 +23,7 @@ http_interactions:
message: Bad Request
headers:
Date:
- Wed, 26 Mar 2025 21:27:19 GMT
- Mon, 31 Mar 2025 20:38:55 GMT
Content-Type:
- application/json
Content-Length:
@ -36,25 +35,25 @@ http_interactions:
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_2b86e02f664e790dfa475f111402b722
- req_3981f27aa18db734b3dd530fa2929b95
Openai-Processing-Ms:
- '146'
- '113'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=gAU0gS_ZQBfQmFkc_jKM73dhkNISbBY9FlQjGnZ6CfU-1743024439-1.0.1.1-bWRoC737.SOJPZrP90wTJLVmelTpxFqIsrunq2Lqgy4J3VvLtYBEBrqY0v4d94F5fMcm0Ju.TfQi0etmvqZtUSMRn6rvkMLmXexRcxP.1jE;
path=/; expires=Wed, 26-Mar-25 21:57:19 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=8KUMK_Gp4f97KLactyy3QniUZbNmN9Zwbx9WowYCc98-1743453535-1.0.1.1-opjT17tCwi9U0AukBXoHrpPEcC4Z.GIyEt.AjjrzRWln62SWPIvggY4L19JabZu09.9cmxfyrwAFHmvDeCVxSWqAVf88PAZwwRICkZZUut0;
path=/; expires=Mon, 31-Mar-25 21:08:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=XnxX4KU80himuKAUavZYtkQasOjXJDJD.QLyMrfBSUU-1743024439792-0.0.1.1-604800000;
- _cfuvid=uZB07768IynyRRP6oxwcnC4Rfn.lGT1yRhzzGvNw0kc-1743453535322-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9269bb3b2c14cf74-CMH
- 9292a7327d5161d6-ORD
Alt-Svc:
- h3=":443"; ma=86400
body:
@ -68,5 +67,5 @@ http_interactions:
"code": "model_not_found"
}
}
recorded_at: Wed, 26 Mar 2025 21:27:19 GMT
recorded_at: Mon, 31 Mar 2025 20:38:55 GMT
recorded_with: VCR 6.3.1

View file

@ -0,0 +1,247 @@
---
http_interactions:
- request:
method: post
uri: https://api.openai.com/v1/responses
body:
encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"}],"instructions":"Use the tools available to you to answer the user''s
question.","tools":[{"type":"function","name":"get_net_worth","description":"Gets
a user''s net worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":null,"stream":null}'
headers:
Content-Type:
- application/json
Authorization:
- Bearer <OPENAI_ACCESS_TOKEN>
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
User-Agent:
- Ruby
response:
status:
code: 200
message: OK
headers:
Date:
- Mon, 31 Mar 2025 20:38:55 GMT
Content-Type:
- application/json
Transfer-Encoding:
- chunked
Connection:
- keep-alive
Openai-Version:
- '2020-10-01'
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_a179c8964589756af0d4b5af864a29a7
Openai-Processing-Ms:
- '761'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=niiWOEhogNgWfxuZanJKipOlIrWGEPtp7bUpqDAp9Lo-1743453535-1.0.1.1-ytL9wC5t5fjY2v90vscRJLokIeZyVY2hmBqFuWbA_BOvZaw9aPFmtQDKhDD3WcLQryEtXiEGAyOANHnaeItCR0J_sXu7Jy4wdpJ4EMShQxU;
path=/; expires=Mon, 31-Mar-25 21:08:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=kKjDNYSJJidsRTyFQWUgt6xlnqW_DkveNOUYxpBe9EE-1743453535972-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9292a732598dcf52-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: ASCII-8BIT
string: |-
{
"id": "resp_67eafd5f2d1881928f10551839e8219102a5ebf5f2a599ef",
"object": "response",
"created_at": 1743453535,
"status": "completed",
"error": null,
"incomplete_details": null,
"instructions": "Use the tools available to you to answer the user's question.",
"max_output_tokens": null,
"model": "gpt-4o-2024-08-06",
"output": [
{
"type": "function_call",
"id": "fc_67eafd5f9c88819286afe92f08354f7302a5ebf5f2a599ef",
"call_id": "call_KrFORr53UBxdwZ9SQ6fkpU0F",
"name": "get_net_worth",
"arguments": "{}",
"status": "completed"
}
],
"parallel_tool_calls": true,
"previous_response_id": null,
"reasoning": {
"effort": null,
"generate_summary": null
},
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
}
},
"tool_choice": "auto",
"tools": [
{
"type": "function",
"description": "Gets a user's net worth",
"name": "get_net_worth",
"parameters": {
"type": "object",
"properties": {},
"required": [],
"additionalProperties": false
},
"strict": true
}
],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 55,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 13,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 68
},
"user": null,
"metadata": {}
}
recorded_at: Mon, 31 Mar 2025 20:38:55 GMT
- request:
method: post
uri: https://api.openai.com/v1/responses
body:
encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"},{"type":"function_call_output","call_id":"call_KrFORr53UBxdwZ9SQ6fkpU0F","output":"\"{\\\"amount\\\":10000,\\\"currency\\\":\\\"USD\\\"}\""}],"instructions":null,"tools":[],"previous_response_id":"resp_67eafd5f2d1881928f10551839e8219102a5ebf5f2a599ef","stream":null}'
headers:
Content-Type:
- application/json
Authorization:
- Bearer <OPENAI_ACCESS_TOKEN>
Accept-Encoding:
- gzip;q=1.0,deflate;q=0.6,identity;q=0.3
Accept:
- "*/*"
User-Agent:
- Ruby
response:
status:
code: 200
message: OK
headers:
Date:
- Mon, 31 Mar 2025 20:38:56 GMT
Content-Type:
- application/json
Transfer-Encoding:
- chunked
Connection:
- keep-alive
Openai-Version:
- '2020-10-01'
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_edd5bafc982bae46e92d0cd79e594779
Openai-Processing-Ms:
- '805'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=jOZGEPyAByXhGrQIvKzbj_6TEODdZWw_S0BZsxbsuDc-1743453536-1.0.1.1-YpxHv.vmXVdwzQV5dMTB0I851tQSlDf.NboFddRq_aLDM1CnQW143gRcYbfPpCREij9SDqhnluZ4kxCuD3eaarhmFn2liMVHHRYUgMsUhck;
path=/; expires=Mon, 31-Mar-25 21:08:56 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=1BoPw7WORdkfBQmal3sGAXdHGiJiFkXK8HXhWPWf7Vw-1743453536967-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9292a7385ec6cf62-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: ASCII-8BIT
string: |-
{
"id": "resp_67eafd6023488192b382acd64a514ff002a5ebf5f2a599ef",
"object": "response",
"created_at": 1743453536,
"status": "completed",
"error": null,
"incomplete_details": null,
"instructions": null,
"max_output_tokens": null,
"model": "gpt-4o-2024-08-06",
"output": [
{
"type": "message",
"id": "msg_67eafd60a42c8192906eb4d48f8970de02a5ebf5f2a599ef",
"status": "completed",
"role": "assistant",
"content": [
{
"type": "output_text",
"text": "Your net worth is $10,000 USD.",
"annotations": []
}
]
}
],
"parallel_tool_calls": true,
"previous_response_id": "resp_67eafd5f2d1881928f10551839e8219102a5ebf5f2a599ef",
"reasoning": {
"effort": null,
"generate_summary": null
},
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
}
},
"tool_choice": "auto",
"tools": [],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 58,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 11,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 69
},
"user": null,
"metadata": {}
}
recorded_at: Mon, 31 Mar 2025 20:38:57 GMT
recorded_with: VCR 6.3.1

View file

@ -8,7 +8,7 @@ http_interactions:
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"}],"instructions":"Use the tools available to you to answer the user''s
question.","tools":[{"type":"function","name":"get_net_worth","description":"Gets
user net worth data","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":null,"stream":true}'
a user''s net worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":null,"stream":true}'
headers:
Content-Type:
- application/json
@ -26,7 +26,7 @@ http_interactions:
message: OK
headers:
Date:
- Wed, 26 Mar 2025 21:22:09 GMT
- Mon, 31 Mar 2025 20:38:55 GMT
Content-Type:
- text/event-stream; charset=utf-8
Transfer-Encoding:
@ -38,60 +38,59 @@ http_interactions:
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_4f04cffbab6051b3ac301038e3796092
- req_8c4d6f0ad0ae3095353a5c19fd128c56
Openai-Processing-Ms:
- '114'
- '129'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=F5haUlL1HA1srjwZugBxG6XWbGg.NyQBnJTTirKs5KI-1743024129-1.0.1.1-D842I3sPgDgH_KXyroq6uVivEnbWvm9WJF.L8a11GgUcULXjhweLHs0mXe6MWruf.FJe.lZj.KmX0tCqqdpKIt5JvlbHXt5D_9svedktlZY;
path=/; expires=Wed, 26-Mar-25 21:52:09 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=5yRGSo0Y69GvEK51Bq2.Np0DSg9DmAJKNqvE3_XgKBg-1743453535-1.0.1.1-sH1YR42zmznwvKlaBUM.bPKvJl_PiebfNBKhREMO.sSa5gvFEkpcKaCG4x3XUdZ19XGTEF0CbRII3mqtcPJhxFzX3uVLGuVsyjz6odYDisM;
path=/; expires=Mon, 31-Mar-25 21:08:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=MmuRzsy8ebDMe6ibCEwtGp2RzcntpAmdvDlhIZtlY1s-1743024129721-0.0.1.1-604800000;
- _cfuvid=tblnBnP9s7yFkSzbYy9zuzuDkxS9i_n7hk3XdiiGui8-1743453535332-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9269b3a97f370002-ORD
- 9292a7324dfbcf46-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: UTF-8
string: |+
event: response.created
data: {"type":"response.created","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
data: {"type":"response.created","response":{"id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","object":"response","created_at":1743453535,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets a user's net worth","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.in_progress
data: {"type":"response.in_progress","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
data: {"type":"response.in_progress","response":{"id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","object":"response","created_at":1743453535,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets a user's net worth","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.output_item.added
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"","status":"in_progress"}}
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"function_call","id":"fc_67eafd5fa714819287b2bff8c76935690aa8698ee903b906","call_id":"call_7EY6rF7mkfNyMIz3HQmrYIOq","name":"get_net_worth","arguments":"","status":"in_progress"}}
event: response.function_call_arguments.delta
data: {"type":"response.function_call_arguments.delta","item_id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","output_index":0,"delta":"{}"}
data: {"type":"response.function_call_arguments.delta","item_id":"fc_67eafd5fa714819287b2bff8c76935690aa8698ee903b906","output_index":0,"delta":"{}"}
event: response.function_call_arguments.done
data: {"type":"response.function_call_arguments.done","item_id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","output_index":0,"arguments":"{}"}
data: {"type":"response.function_call_arguments.done","item_id":"fc_67eafd5fa714819287b2bff8c76935690aa8698ee903b906","output_index":0,"arguments":"{}"}
event: response.output_item.done
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"{}","status":"completed"}}
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"function_call","id":"fc_67eafd5fa714819287b2bff8c76935690aa8698ee903b906","call_id":"call_7EY6rF7mkfNyMIz3HQmrYIOq","name":"get_net_worth","arguments":"{}","status":"completed"}}
event: response.completed
data: {"type":"response.completed","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"completed","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"{}","status":"completed"}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":271,"input_tokens_details":{"cached_tokens":0},"output_tokens":13,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":284},"user":null,"metadata":{}}}
data: {"type":"response.completed","response":{"id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","object":"response","created_at":1743453535,"status":"completed","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"function_call","id":"fc_67eafd5fa714819287b2bff8c76935690aa8698ee903b906","call_id":"call_7EY6rF7mkfNyMIz3HQmrYIOq","name":"get_net_worth","arguments":"{}","status":"completed"}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets a user's net worth","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":55,"input_tokens_details":{"cached_tokens":0},"output_tokens":13,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":68},"user":null,"metadata":{}}}
recorded_at: Wed, 26 Mar 2025 21:22:10 GMT
recorded_at: Mon, 31 Mar 2025 20:38:55 GMT
- request:
method: post
uri: https://api.openai.com/v1/responses
body:
encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"},{"type":"function_call_output","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","output":"\"$124,200\""}],"instructions":"Use
the tools available to you to answer the user''s question.","tools":[],"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","stream":true}'
worth?"},{"type":"function_call_output","call_id":"call_7EY6rF7mkfNyMIz3HQmrYIOq","output":"{\"amount\":10000,\"currency\":\"USD\"}"}],"instructions":null,"tools":[],"previous_response_id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","stream":true}'
headers:
Content-Type:
- application/json
@ -109,7 +108,7 @@ http_interactions:
message: OK
headers:
Date:
- Wed, 26 Mar 2025 21:22:10 GMT
- Mon, 31 Mar 2025 20:38:56 GMT
Content-Type:
- text/event-stream; charset=utf-8
Transfer-Encoding:
@ -121,81 +120,84 @@ http_interactions:
Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>"
X-Request-Id:
- req_792bf572fac53f7e139b29d462933d8f
- req_be9f30124a3a4cdae2d3b038692f6699
Openai-Processing-Ms:
- '148'
- '177'
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Cf-Cache-Status:
- DYNAMIC
Set-Cookie:
- __cf_bm=HHguTnSUQFt9KezJAQCrQF_OHn8ZH1C4xDjXRgexdzM-1743024130-1.0.1.1-ZhqxuASVfISfGQbvvKSNy_OQiUfkeIPN2DZhors0K4cl_BzE_P5u9kbc1HkgwyW1A_6GNAenh8Fr9AkoJ0zSakdg5Dr9AU.lu5nr7adQ_60;
path=/; expires=Wed, 26-Mar-25 21:52:10 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=gNS8vmdzyz2jct__mfjLZGkJhCxddarRy62IkzSIFWM-1743453536-1.0.1.1-ufcPPmSzEaEysjhkRUozTfCIriRWy5iyeXCaVqeFDaJDWT4lc8ate4JhryV0fVQSZBi6pRN8zYh9dkLyYuXoSqYDCsZTN1uk6vO84nX1qGo;
path=/; expires=Mon, 31-Mar-25 21:08:56 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=hX9Y33ruiC9mhYzrOoxyOh23Gy.MfQa54h9l5CllWlI-1743024130948-0.0.1.1-604800000;
- _cfuvid=3D41ZgFle.u0ER2Ehnm.bsdnSGlCXVArPa7bx9zumYU-1743453536171-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options:
- nosniff
Server:
- cloudflare
Cf-Ray:
- 9269b3b0da83cf67-CMH
- 9292a7376f5acf4e-CMH
Alt-Svc:
- h3=":443"; ma=86400
body:
encoding: UTF-8
string: |+
event: response.created
data: {"type":"response.created","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
data: {"type":"response.created","response":{"id":"resp_67eafd5ff7448192af7cd9e9dde90f5e0aa8698ee903b906","object":"response","created_at":1743453536,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.in_progress
data: {"type":"response.in_progress","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
data: {"type":"response.in_progress","response":{"id":"resp_67eafd5ff7448192af7cd9e9dde90f5e0aa8698ee903b906","object":"response","created_at":1743453536,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}}
event: response.output_item.added
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"in_progress","role":"assistant","content":[]}}
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","status":"in_progress","role":"assistant","content":[]}}
event: response.content_part.added
data: {"type":"response.content_part.added","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}}
data: {"type":"response.content_part.added","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"Your"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":"Your"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" net"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":" net"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" worth"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":" worth"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" is"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":" is"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" $"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":" $"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"124"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":"10"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":","}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":","}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"200"}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":"000"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"."}
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":" USD"}
event: response.output_text.delta
data: {"type":"response.output_text.delta","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"delta":"."}
event: response.output_text.done
data: {"type":"response.output_text.done","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"text":"Your net worth is $124,200."}
data: {"type":"response.output_text.done","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"text":"Your net worth is $10,000 USD."}
event: response.content_part.done
data: {"type":"response.content_part.done","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}}
data: {"type":"response.content_part.done","item_id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Your net worth is $10,000 USD.","annotations":[]}}
event: response.output_item.done
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}]}}
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $10,000 USD.","annotations":[]}]}}
event: response.completed
data: {"type":"response.completed","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"completed","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":85,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":95},"user":null,"metadata":{}}}
data: {"type":"response.completed","response":{"id":"resp_67eafd5ff7448192af7cd9e9dde90f5e0aa8698ee903b906","object":"response","created_at":1743453536,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67eafd6084ec81929a5132414ef713180aa8698ee903b906","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $10,000 USD.","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":"resp_67eafd5f2ef0819290ec6bbbc5f27c8e0aa8698ee903b906","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":56,"input_tokens_details":{"cached_tokens":0},"output_tokens":11,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":67},"user":null,"metadata":{}}}
recorded_at: Wed, 26 Mar 2025 21:22:11 GMT
recorded_at: Mon, 31 Mar 2025 20:38:58 GMT
recorded_with: VCR 6.3.1
...