1
0
Fork 0
mirror of https://github.com/maybe-finance/maybe.git synced 2025-08-10 07:55:21 +02:00

Interface updates

This commit is contained in:
Zach Gollwitzer 2025-03-28 16:48:11 -04:00
parent 5ab59d44aa
commit 1aa7755593
10 changed files with 536 additions and 368 deletions

View file

@ -18,52 +18,98 @@ class Assistant
@chat = chat @chat = chat
end end
def streamer(model) class ToolCaller
assistant_message = AssistantMessage.new( def initialize(functions: [])
chat: chat, @functions = functions
content: "",
ai_model: model
)
proc do |chunk|
case chunk.type
when "output_text"
stop_thinking
assistant_message.content += chunk.data
assistant_message.save!
when "function_request"
update_thinking("Analyzing your data to assist you with your question...")
when "response"
stop_thinking
assistant_message.ai_model = chunk.data.model
combined_tool_calls = chunk.data.functions.map do |tc|
ToolCall::Function.new(
provider_id: tc.id,
provider_call_id: tc.call_id,
function_name: tc.name,
function_arguments: tc.arguments,
function_result: tc.result
)
end
assistant_message.tool_calls = combined_tool_calls
assistant_message.save!
chat.update!(latest_assistant_response_id: chunk.data.id)
end
end end
def call_function(function_request)
name = function_request.function_name
args = JSON.parse(function_request.function_arguments)
fn = get_function(name)
result = fn.call(args)
ToolCall::Function.new(
provider_id: function_request.provider_id,
provider_call_id: function_request.provider_call_id,
function_name: name,
function_arguments: args,
function_result: result
)
rescue => e
fn_execution_details = {
fn_name: name,
fn_args: args
}
message = "Error calling function #{name} with arguments #{args}: #{e.message}"
raise StandardError.new(message)
end
private
attr_reader :functions
def get_function(name)
functions.find { |f| f.name == name }
end
end end
def respond_to(message) def respond_to(message)
chat.clear_error chat.clear_error
sleep artificial_thinking_delay sleep artificial_thinking_delay
provider = get_model_provider(message.ai_model) provider = get_model_provider(message.ai_model)
tool_caller = ToolCaller.new(functions: functions)
assistant_response = AssistantMessage.new(
chat: chat,
content: "",
ai_model: message.ai_model
)
streamer = proc do |chunk|
case chunk.type
when "output_text"
stop_thinking
assistant_response.content += chunk.data
assistant_response.save!
when "response"
if chunk.data.function_requests.any?
update_thinking("Analyzing your data to assist you with your question...")
tool_calls = chunk.data.function_requests.map do |fn_request|
tool_caller.call_function(fn_request)
end
assistant_response.tool_calls = tool_calls
assistant_response.save!
provider.chat_response(
message.content,
model: message.ai_model,
instructions: instructions,
functions: functions.map(&:to_h),
function_results: tool_calls.map(&:to_h),
streamer: streamer
)
else
stop_thinking
end
chat.update!(latest_assistant_response_id: chunk.data.id)
end
end
provider.chat_response( provider.chat_response(
message, message.content,
model: message.ai_model,
instructions: instructions, instructions: instructions,
available_functions: functions, functions: functions.map(&:to_h),
streamer: streamer(message.ai_model) function_results: [],
streamer: streamer
) )
rescue => e rescue => e
chat.add_error(e) chat.add_error(e)
@ -78,28 +124,6 @@ class Assistant
chat.broadcast_remove target: "thinking-indicator" chat.broadcast_remove target: "thinking-indicator"
end end
def process_response_artifacts(data)
messages = data.messages.map do |message|
AssistantMessage.new(
chat: chat,
content: message.content,
provider_id: message.id,
ai_model: data.model,
tool_calls: data.functions.map do |fn|
ToolCall::Function.new(
provider_id: fn.id,
provider_call_id: fn.call_id,
function_name: fn.name,
function_arguments: fn.arguments,
function_result: fn.result
)
end
)
end
messages.each(&:save!)
end
def instructions def instructions
<<~PROMPT <<~PROMPT
## Your identity ## Your identity

View file

@ -34,6 +34,15 @@ class Assistant::Function
true true
end end
def to_h
{
name: name,
description: description,
parameters: params_schema,
strict: strict_mode?
}
end
private private
attr_reader :user attr_reader :user

View file

@ -6,8 +6,12 @@ module Provider::LlmProvider
end end
private private
StreamChunk = Data.define(:type, :data) StreamChunk = Data.define(:provider_type, :data)
ChatResponse = Data.define(:id, :messages, :functions, :model) ChatResponse = Data.define(:provider_id, :model, :messages, :function_calls) do
Message = Data.define(:id, :content) def final?
FunctionExecution = Data.define(:id, :call_id, :name, :arguments, :result) function_calls.empty?
end
end
Message = Data.define(:provider_id, :content)
FunctionCall = Data.define(:provider_id, :provider_call_id, :name, :arguments, :result)
end end

View file

@ -11,17 +11,40 @@ class Provider::Openai < Provider
MODELS.include?(model) MODELS.include?(model)
end end
def chat_response(message, instructions: nil, available_functions: [], streamer: nil) def chat_response(prompt, model:, instructions: nil, functions: [], function_results: [], previous_response_id: nil)
with_provider_response do with_provider_response do
processor = ChatResponseProcessor.new( proxy_streamer = proc do |chunk|
client: client, type = chunk.dig("type")
message: message, end
instructions: instructions,
available_functions: available_functions,
streamer: streamer
)
processor.process function_results_input = function_results.map do |fn_result|
{
type: "function_call_output",
call_id: fn_result[:provider_call_id],
output: fn_result[:result].to_json
}
end
prompt_input = [ { role: "user", content: prompt } ]
tools = functions.map do |fn|
{
type: "function",
name: fn[:name],
description: fn[:description],
parameters: fn[:params_schema],
strict: fn[:strict]
}
end
client.responses.create(parameters: {
model: model,
input: prompt_input + function_results_input,
instructions: instructions,
tools: tools,
previous_response_id: previous_response_id,
stream: streamer
})
end end
end end

View file

@ -1,103 +1,101 @@
class Provider::Openai::ChatResponseProcessor class Provider::Openai::ChatResponseProcessor
def initialize(message:, client:, instructions: nil, available_functions: [], streamer: nil) include Provider::Openai::Parser
def initialize(message:, function_caller:, client:, subscribers:, instructions: nil)
@client = client @client = client
@message = message @message = message
@instructions = instructions @instructions = instructions
@available_functions = available_functions @function_caller = function_caller
@streamer = streamer @streamer = build_streamer(subscribers)
end
def build_streamer(subscribers)
ChatStreamer.new(
client: client,
function_caller: function_caller,
subscribers: subscribers
)
end end
def process def process
first_response = fetch_response(previous_response_id: previous_openai_response_id) raw_first_response = fetch_response(input, previous_response_id: previous_openai_response_id)
if first_response.functions.empty? function_requests = extract_function_requests(raw_first_response)
if streamer.present?
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "response", data: first_response))
end
return first_response function_calls = function_requests.map do |function_request|
function_caller.fulfill_request(function_request)
end end
executed_functions = execute_pending_functions(first_response.functions) first_response = build_response(raw_first_response, function_calls: function_calls)
follow_up_response = fetch_response( if first_response.function_calls.empty?
executed_functions: executed_functions, return [ first_response ]
previous_response_id: first_response.id end
raw_follow_up_response = fetch_response(
input + function_caller.build_results_input(function_calls),
previous_response_id: first_response.provider_id,
) )
if streamer.present? follow_up_response = build_response(raw_follow_up_response)
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "response", data: follow_up_response))
end
follow_up_response [ first_response, follow_up_response ]
end end
private private
attr_reader :client, :message, :instructions, :available_functions, :streamer attr_reader :client, :message, :instructions, :streamer, :function_caller
PendingFunction = Data.define(:id, :call_id, :name, :arguments) StreamChunk = Provider::LlmProvider::StreamChunk
ChatResponse = Provider::LlmProvider::ChatResponse
Message = Provider::LlmProvider::Message
FunctionCall = Provider::LlmProvider::FunctionCall
Error = Provider::Openai::Error
def fetch_response(executed_functions: [], previous_response_id: nil) def build_response(response, function_calls: [])
function_results = executed_functions.map do |executed_function| ChatResponse.new(
{ provider_id: extract_id(response),
type: "function_call_output", model: extract_model(response),
call_id: executed_function.call_id, messages: extract_messages(response).map do |msg|
output: executed_function.result.to_json Message.new(
} provider_id: msg[:id],
end content: msg[:output_text]
)
end,
function_calls: function_calls
)
end
prepared_input = input + function_results def fetch_response(input, previous_response_id: nil)
# raw_response = nil
# No need to pass tools for follow-up messages that provide function results # internal_streamer = proc do |chunk|
prepared_tools = executed_functions.empty? ? tools : [] # type = chunk.dig("type")
raw_response = nil # if type == "response.completed"
# raw_response = chunk.dig("response")
internal_streamer = proc do |chunk| # end
type = chunk.dig("type")
if streamer.present?
case type
when "response.output_text.delta", "response.refusal.delta"
# We don't distinguish between text and refusal yet, so stream both the same
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "output_text", data: chunk.dig("delta")))
when "response.function_call_arguments.done"
streamer.call(Provider::LlmProvider::StreamChunk.new(type: "function_request", data: chunk.dig("arguments")))
end
end
if type == "response.completed"
raw_response = chunk.dig("response")
end
end
# if streamer.present?
# case type
# when "response.output_text.delta", "response.refusal.delta"
# # We don't distinguish between text and refusal yet, so stream both the same
# streamer.call(StreamChunk.new(provider_type: "output_text", data: chunk.dig("delta")))
# when "response.function_call_arguments.done"
# streamer.call(StreamChunk.new(provider_type: "function_request", data: chunk.dig("arguments")))
# when "response.completed"
# normalized = normalize_chat_response(chunk.dig("response"), function_results: function_results)
# streamer.call(StreamChunk.new(provider_type: "response", data: normalized))
# end
# end
# end
client.responses.create(parameters: { client.responses.create(parameters: {
model: model, model: model,
input: prepared_input, input: input,
instructions: instructions, instructions: instructions,
tools: prepared_tools, tools: function_caller.openai_tools,
previous_response_id: previous_response_id, previous_response_id: previous_response_id,
stream: internal_streamer stream: streamer
}) })
if raw_response.dig("status") == "failed" || raw_response.dig("status") == "incomplete"
raise Provider::Openai::Error.new("OpenAI returned a failed or incomplete response", { chunk: chunk })
end
response_output = raw_response.dig("output")
functions_output = if executed_functions.any?
executed_functions
else
extract_pending_functions(response_output)
end
Provider::LlmProvider::ChatResponse.new(
id: raw_response.dig("id"),
messages: extract_messages(response_output),
functions: functions_output,
model: raw_response.dig("model")
)
end end
def chat def chat
@ -117,72 +115,4 @@ class Provider::Openai::ChatResponseProcessor
def input def input
[ { role: "user", content: message.content } ] [ { role: "user", content: message.content } ]
end end
def extract_messages(response_output)
message_items = response_output.filter { |item| item.dig("type") == "message" }
message_items.map do |item|
output_text = item.dig("content").map do |content|
text = content.dig("text")
refusal = content.dig("refusal")
text || refusal
end.flatten.join("\n")
Provider::LlmProvider::Message.new(
id: item.dig("id"),
content: output_text,
)
end
end
def extract_pending_functions(response_output)
response_output.filter { |item| item.dig("type") == "function_call" }.map do |item|
PendingFunction.new(
id: item.dig("id"),
call_id: item.dig("call_id"),
name: item.dig("name"),
arguments: item.dig("arguments"),
)
end
end
def execute_pending_functions(pending_functions)
pending_functions.map do |pending_function|
execute_function(pending_function)
end
end
def execute_function(fn)
fn_instance = available_functions.find { |f| f.name == fn.name }
parsed_args = JSON.parse(fn.arguments)
result = fn_instance.call(parsed_args)
Provider::LlmProvider::FunctionExecution.new(
id: fn.id,
call_id: fn.call_id,
name: fn.name,
arguments: parsed_args,
result: result
)
rescue => e
fn_execution_details = {
fn_name: fn.name,
fn_args: parsed_args
}
raise Provider::Openai::Error.new(e, fn_execution_details)
end
def tools
available_functions.map do |fn|
{
type: "function",
name: fn.name,
description: fn.description,
parameters: fn.params_schema,
strict: fn.strict_mode?
}
end
end
end end

View file

@ -1,13 +1,80 @@
# A stream proxy for OpenAI chat responses # A stream proxy for OpenAI chat responses
# #
# - Consumes an OpenAI chat response stream # - Consumes OpenAI stream chunks
# - Outputs a generic "Chat Provider Stream" interface to consumers (e.g. `Assistant`) # - Outputs generic stream chunks to a "subscriber" (e.g. `Assistant`) if subscriber is supplied
class Provider::Openai::ChatStreamer class Provider::Openai::ChatStreamer
include Provider::Openai::Parser
def initialize(output_stream) def initialize(output_stream)
@output_stream = output_stream @output_stream = output_stream
end end
def call(chunk) def call(chunk)
@output_stream.call(chunk) output = parse_chunk(chunk)
output_stream.call(output) unless output.nil?
end end
private
attr_reader :output_stream
Chunk = Provider::LlmProvider::StreamChunk
Response = Provider::LlmProvider::ChatResponse
Message = Provider::LlmProvider::Message
def parse_chunk(chunk)
type = chunk.dig("type")
case type
when "response.output_text.delta", "response.refusal.delta"
build_chunk("output_text", chunk.dig("delta"))
when "response.function_call_arguments.done"
build_chunk("function_request", chunk.dig("arguments"))
when "response.completed"
handle_response(chunk.dig("response"))
end
end
def handle_response(response)
function_requests = extract_function_requests(response)
function_calls = function_requests.map do |function_request|
@function_caller.fulfill_request(function_request)
end
normalized_response = build_response(response, function_calls: function_calls)
build_chunk("response", normalized_response)
end
def build_chunk(type, data)
Chunk.new(
provider_type: type,
data: data
)
end
def build_response(response, function_calls: [])
Response.new(
provider_id: extract_id(response),
model: extract_model(response),
messages: extract_messages(response).map do |msg|
Message.new(
provider_id: msg[:id],
content: msg[:output_text]
)
end,
function_calls: function_calls
)
end
def fetch_response(input, previous_response_id: nil)
client.responses.create(parameters: {
model: model,
input: input,
instructions: instructions,
tools: function_caller.openai_tools,
previous_response_id: previous_response_id,
stream: streamer
})
end
end end

View file

@ -1,3 +1,13 @@
class ToolCall < ApplicationRecord class ToolCall < ApplicationRecord
belongs_to :message belongs_to :message
def to_h
{
provider_id: provider_id,
provider_call_id: provider_call_id,
name: function_name,
arguments: function_arguments,
result: function_result
}
end
end end

View file

@ -33,16 +33,18 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
response = @subject.chat_response(message) response = @subject.chat_response(message)
assert response.success? assert response.success?
assert_equal 1, response.data.messages.size assert_equal 1, response.data.size
assert_includes response.data.messages.first.content, "Yes" assert response.data.first.final?
assert_equal 1, response.data.first.messages.size
assert_includes response.data.first.messages.first.content, "Yes"
end end
end end
test "streams basic chat response" do test "streams basic chat response" do
VCR.use_cassette("openai/chat/basic_response") do VCR.use_cassette("openai/chat/basic_streaming_response") do
collected_chunks = [] collected_chunks = []
mock_streamer = proc do |chunk| mock_subscriber = proc do |chunk|
collected_chunks << chunk collected_chunks << chunk
end end
@ -52,11 +54,11 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
ai_model: @subject_model ai_model: @subject_model
) )
@subject.chat_response(message, streamer: mock_streamer) @subject.chat_response(message, stream_subscriber: mock_subscriber)
tool_call_chunks = collected_chunks.select { |chunk| chunk.type == "function_request" } tool_call_chunks = collected_chunks.select { |chunk| chunk.provider_type == "function_request" }
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" } text_chunks = collected_chunks.select { |chunk| chunk.provider_type == "output_text" }
response_chunks = collected_chunks.select { |chunk| chunk.type == "response" } response_chunks = collected_chunks.select { |chunk| chunk.provider_type == "response" }
assert_equal 1, text_chunks.size assert_equal 1, text_chunks.size
assert_equal 1, response_chunks.size assert_equal 1, response_chunks.size
@ -74,15 +76,24 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
available_functions: [ PredictableToolFunction.new(@chat) ] available_functions: [ PredictableToolFunction.new(@chat) ]
) )
# Two responses: one for function requests, one follow-up for text output
assert response.success? assert response.success?
assert_equal 1, response.data.functions.size assert_equal 2, response.data.size
assert_equal 1, response.data.messages.size
assert_includes response.data.messages.first.content, PredictableToolFunction.expected_test_result # First response has function requests / results
assert_not response.data.first.final?
assert_equal 1, response.data.first.function_calls.size
# Second response has text output that uses the function results
assert response.data.last.final?
assert_equal 0, response.data.last.function_calls.size
assert_equal 1, response.data.last.messages.size
assert_includes response.data.last.messages.first.content, PredictableToolFunction.expected_test_result
end end
end end
test "streams chat response with tool calls" do test "streams chat response with tool calls" do
VCR.use_cassette("openai/chat/tool_calls") do VCR.use_cassette("openai/chat/streaming_tool_calls") do
collected_chunks = [] collected_chunks = []
mock_streamer = proc do |chunk| mock_streamer = proc do |chunk|
@ -96,16 +107,17 @@ class Provider::OpenaiTest < ActiveSupport::TestCase
streamer: mock_streamer streamer: mock_streamer
) )
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" } text_chunks = collected_chunks.select { |chunk| chunk.provider_type == "output_text" }
text_chunks = collected_chunks.select { |chunk| chunk.type == "output_text" } tool_call_chunks = collected_chunks.select { |chunk| chunk.provider_type == "function_request" }
tool_call_chunks = collected_chunks.select { |chunk| chunk.type == "function_request" } response_chunks = collected_chunks.select { |chunk| chunk.provider_type == "response" }
response_chunks = collected_chunks.select { |chunk| chunk.type == "response" }
assert_equal 1, tool_call_chunks.count assert_equal 1, tool_call_chunks.count
assert text_chunks.count >= 1 assert text_chunks.count >= 1
assert_equal 1, response_chunks.count assert_equal 2, response_chunks.count
assert_includes response_chunks.first.data.messages.first.content, PredictableToolFunction.expected_test_result assert_not response_chunks.first.data.final?
assert response_chunks.last.data.final?
assert_includes response_chunks.last.data.messages.first.content, PredictableToolFunction.expected_test_result
end end
end end

View file

@ -6,7 +6,7 @@ http_interactions:
body: body:
encoding: UTF-8 encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"This is a chat string: '{"model":"gpt-4o","input":[{"role":"user","content":"This is a chat
test. If it''s working, respond with a single word: Yes"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":true}' test. If it''s working, respond with a single word: Yes"}],"instructions":null,"tools":[],"previous_response_id":null,"stream":null}'
headers: headers:
Content-Type: Content-Type:
- application/json - application/json
@ -24,9 +24,9 @@ http_interactions:
message: OK message: OK
headers: headers:
Date: Date:
- Wed, 26 Mar 2025 21:27:38 GMT - Fri, 28 Mar 2025 14:23:22 GMT
Content-Type: Content-Type:
- text/event-stream; charset=utf-8 - application/json
Transfer-Encoding: Transfer-Encoding:
- chunked - chunked
Connection: Connection:
@ -36,57 +36,85 @@ http_interactions:
Openai-Organization: Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>" - "<OPENAI_ORGANIZATION_ID>"
X-Request-Id: X-Request-Id:
- req_8fce503a4c5be145dda20867925b1622 - req_13900312235c86dca9983c43f884612f
Openai-Processing-Ms: Openai-Processing-Ms:
- '103' - '1103'
Strict-Transport-Security: Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload - max-age=31536000; includeSubDomains; preload
Cf-Cache-Status: Cf-Cache-Status:
- DYNAMIC - DYNAMIC
Set-Cookie: Set-Cookie:
- __cf_bm=o5kysxtwKJs3TPoOquM0X4MkyLIaylWhRd8LhagxXck-1743024458-1.0.1.1-ol6ndVCx6dHLGnc9.YmKYwgfOBqhSZSBpIHg4STCi4OBhrgt70FYPmMptrYDvg.SoFuS5RAS_pGiNNWXHspHio3gTfJ87vIdT936GYHIDrc; - __cf_bm=ZxtRGPPVT3H1qcGwlqfVjC13JJboKjEOkFsJoUfcduA-1743171802-1.0.1.1-PKolgwX6T_lqh6cFQXy9f3CPACskAir7Sfyt1ZNEKIHPDz3Ehn7lNILDASLxhBRmYBDKTMQqaUeyvSolvp1U00OhqfgSr51HxKIZNeEDglg;
path=/; expires=Wed, 26-Mar-25 21:57:38 GMT; domain=.api.openai.com; HttpOnly; path=/; expires=Fri, 28-Mar-25 14:53:22 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None Secure; SameSite=None
- _cfuvid=Iqk8pY6uwz2lLhdKt0PwWTdtYQUqqvS6xmP9DMVko2A-1743024458829-0.0.1.1-604800000; - _cfuvid=.uHFzkeaG4pJ9kcwsDW9PIfdKQgoEAz4Voe6QjxyH7E-1743171802602-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options: X-Content-Type-Options:
- nosniff - nosniff
Server: Server:
- cloudflare - cloudflare
Cf-Ray: Cf-Ray:
- 9269bbb21b1ecf43-CMH - 9277c8eafdbe10c3-ORD
Alt-Svc: Alt-Svc:
- h3=":443"; ma=86400 - h3=":443"; ma=86400
body: body:
encoding: UTF-8 encoding: ASCII-8BIT
string: |+ string: |-
event: response.created {
data: {"type":"response.created","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "id": "resp_67e6b0d97d34819296bd4e0d24c6f3db00ff3778b60aa4aa",
"object": "response",
event: response.in_progress "created_at": 1743171801,
data: {"type":"response.in_progress","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"in_progress","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "status": "completed",
"error": null,
event: response.output_item.added "incomplete_details": null,
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"in_progress","role":"assistant","content":[]}} "instructions": null,
"max_output_tokens": null,
event: response.content_part.added "model": "gpt-4o-2024-08-06",
data: {"type":"response.content_part.added","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} "output": [
{
event: response.output_text.delta "type": "message",
data: {"type":"response.output_text.delta","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"delta":"Yes"} "id": "msg_67e6b0da49b48192a610e3f3493f056600ff3778b60aa4aa",
"status": "completed",
event: response.output_text.done "role": "assistant",
data: {"type":"response.output_text.done","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"text":"Yes"} "content": [
{
event: response.content_part.done "type": "output_text",
data: {"type":"response.content_part.done","item_id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Yes","annotations":[]}} "text": "Yes",
"annotations": []
event: response.output_item.done }
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}} ]
}
event: response.completed ],
data: {"type":"response.completed","response":{"id":"resp_67e4714ab0148192ae2cc4303794d6fc0c1a792abcdc2819","object":"response","created_at":1743024458,"status":"completed","error":null,"incomplete_details":null,"instructions":null,"max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67e4714b1f8c8192b9b16febe8be86550c1a792abcdc2819","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Yes","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":43,"input_tokens_details":{"cached_tokens":0},"output_tokens":2,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":45},"user":null,"metadata":{}}} "parallel_tool_calls": true,
"previous_response_id": null,
recorded_at: Wed, 26 Mar 2025 21:27:39 GMT "reasoning": {
"effort": null,
"generate_summary": null
},
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
}
},
"tool_choice": "auto",
"tools": [],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 43,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 2,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 45
},
"user": null,
"metadata": {}
}
recorded_at: Fri, 28 Mar 2025 14:23:22 GMT
recorded_with: VCR 6.3.1 recorded_with: VCR 6.3.1
...

View file

@ -8,7 +8,7 @@ http_interactions:
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"}],"instructions":"Use the tools available to you to answer the user''s worth?"}],"instructions":"Use the tools available to you to answer the user''s
question.","tools":[{"type":"function","name":"get_net_worth","description":"Gets question.","tools":[{"type":"function","name":"get_net_worth","description":"Gets
user net worth data","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":null,"stream":true}' user net worth data","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":null,"stream":null}'
headers: headers:
Content-Type: Content-Type:
- application/json - application/json
@ -26,9 +26,9 @@ http_interactions:
message: OK message: OK
headers: headers:
Date: Date:
- Wed, 26 Mar 2025 21:22:09 GMT - Fri, 28 Mar 2025 14:26:28 GMT
Content-Type: Content-Type:
- text/event-stream; charset=utf-8 - application/json
Transfer-Encoding: Transfer-Encoding:
- chunked - chunked
Connection: Connection:
@ -38,60 +38,104 @@ http_interactions:
Openai-Organization: Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>" - "<OPENAI_ORGANIZATION_ID>"
X-Request-Id: X-Request-Id:
- req_4f04cffbab6051b3ac301038e3796092 - req_a248d37b03c551b1c46bd378bb1222df
Openai-Processing-Ms: Openai-Processing-Ms:
- '114' - '844'
Strict-Transport-Security: Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload - max-age=31536000; includeSubDomains; preload
Cf-Cache-Status: Cf-Cache-Status:
- DYNAMIC - DYNAMIC
Set-Cookie: Set-Cookie:
- __cf_bm=F5haUlL1HA1srjwZugBxG6XWbGg.NyQBnJTTirKs5KI-1743024129-1.0.1.1-D842I3sPgDgH_KXyroq6uVivEnbWvm9WJF.L8a11GgUcULXjhweLHs0mXe6MWruf.FJe.lZj.KmX0tCqqdpKIt5JvlbHXt5D_9svedktlZY; - __cf_bm=z02SQd3cf0pSyMSllLTxVBBxPnoCRo.246XvQI3uWt8-1743171988-1.0.1.1-hR7THce0m_rGv7n1VCoCEziHmALE_MlWvIn6g3U70rjidYTqMkk4sui43cwZcBowvzGu51QGYgqGxYcNujkCU7otQh.6rJ7xwhluERZ1dVQ;
path=/; expires=Wed, 26-Mar-25 21:52:09 GMT; domain=.api.openai.com; HttpOnly; path=/; expires=Fri, 28-Mar-25 14:56:28 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None Secure; SameSite=None
- _cfuvid=MmuRzsy8ebDMe6ibCEwtGp2RzcntpAmdvDlhIZtlY1s-1743024129721-0.0.1.1-604800000; - _cfuvid=uydeMYssjSrN96Tp7VCbfJ_0P.t29RCD.hgjMqpo4Ys-1743171988305-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options: X-Content-Type-Options:
- nosniff - nosniff
Server: Server:
- cloudflare - cloudflare
Cf-Ray: Cf-Ray:
- 9269b3a97f370002-ORD - 9277cd78fa721167-ORD
Alt-Svc: Alt-Svc:
- h3=":443"; ma=86400 - h3=":443"; ma=86400
body: body:
encoding: UTF-8 encoding: ASCII-8BIT
string: |+ string: |-
event: response.created {
data: {"type":"response.created","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "id": "resp_67e6b19371d08192912d64ea220597a40a63c04976d80348",
"object": "response",
event: response.in_progress "created_at": 1743171987,
data: {"type":"response.in_progress","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "status": "completed",
"error": null,
event: response.output_item.added "incomplete_details": null,
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"","status":"in_progress"}} "instructions": "Use the tools available to you to answer the user's question.",
"max_output_tokens": null,
event: response.function_call_arguments.delta "model": "gpt-4o-2024-08-06",
data: {"type":"response.function_call_arguments.delta","item_id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","output_index":0,"delta":"{}"} "output": [
{
event: response.function_call_arguments.done "type": "function_call",
data: {"type":"response.function_call_arguments.done","item_id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","output_index":0,"arguments":"{}"} "id": "fc_67e6b19421dc8192bea1cbade015ebf60a63c04976d80348",
"call_id": "call_nSDlhJck6mchpjJ7YtjRLPJ3",
event: response.output_item.done "name": "get_net_worth",
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"{}","status":"completed"}} "arguments": "{}",
"status": "completed"
event: response.completed }
data: {"type":"response.completed","response":{"id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","object":"response","created_at":1743024129,"status":"completed","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"function_call","id":"fc_67e4700222008192b3a26ce30fe7ad02069d9116026394b6","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","name":"get_net_worth","arguments":"{}","status":"completed"}],"parallel_tool_calls":true,"previous_response_id":null,"reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[{"type":"function","description":"Gets user net worth data","name":"get_net_worth","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":271,"input_tokens_details":{"cached_tokens":0},"output_tokens":13,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":284},"user":null,"metadata":{}}} ],
"parallel_tool_calls": true,
recorded_at: Wed, 26 Mar 2025 21:22:10 GMT "previous_response_id": null,
"reasoning": {
"effort": null,
"generate_summary": null
},
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
}
},
"tool_choice": "auto",
"tools": [
{
"type": "function",
"description": "Gets user net worth data",
"name": "get_net_worth",
"parameters": {
"type": "object",
"properties": {},
"required": [],
"additionalProperties": false
},
"strict": true
}
],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 271,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 13,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 284
},
"user": null,
"metadata": {}
}
recorded_at: Fri, 28 Mar 2025 14:26:28 GMT
- request: - request:
method: post method: post
uri: https://api.openai.com/v1/responses uri: https://api.openai.com/v1/responses
body: body:
encoding: UTF-8 encoding: UTF-8
string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net string: '{"model":"gpt-4o","input":[{"role":"user","content":"What is my net
worth?"},{"type":"function_call_output","call_id":"call_FtvrJsTMg7he0mTeThIqktyL","output":"\"$124,200\""}],"instructions":"Use worth?"},{"type":"function_call_output","call_id":"call_nSDlhJck6mchpjJ7YtjRLPJ3","output":"\"$124,200\""}],"instructions":"Use
the tools available to you to answer the user''s question.","tools":[],"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","stream":true}' the tools available to you to answer the user''s question.","tools":[{"type":"function","name":"get_net_worth","description":"Gets
user net worth data","parameters":{"type":"object","properties":{},"required":[],"additionalProperties":false},"strict":true}],"previous_response_id":"resp_67e6b19371d08192912d64ea220597a40a63c04976d80348","stream":null}'
headers: headers:
Content-Type: Content-Type:
- application/json - application/json
@ -109,9 +153,9 @@ http_interactions:
message: OK message: OK
headers: headers:
Date: Date:
- Wed, 26 Mar 2025 21:22:10 GMT - Fri, 28 Mar 2025 14:26:29 GMT
Content-Type: Content-Type:
- text/event-stream; charset=utf-8 - application/json
Transfer-Encoding: Transfer-Encoding:
- chunked - chunked
Connection: Connection:
@ -121,81 +165,98 @@ http_interactions:
Openai-Organization: Openai-Organization:
- "<OPENAI_ORGANIZATION_ID>" - "<OPENAI_ORGANIZATION_ID>"
X-Request-Id: X-Request-Id:
- req_792bf572fac53f7e139b29d462933d8f - req_f98ef29da61f92624043f8811022723f
Openai-Processing-Ms: Openai-Processing-Ms:
- '148' - '955'
Strict-Transport-Security: Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload - max-age=31536000; includeSubDomains; preload
Cf-Cache-Status: Cf-Cache-Status:
- DYNAMIC - DYNAMIC
Set-Cookie: Set-Cookie:
- __cf_bm=HHguTnSUQFt9KezJAQCrQF_OHn8ZH1C4xDjXRgexdzM-1743024130-1.0.1.1-ZhqxuASVfISfGQbvvKSNy_OQiUfkeIPN2DZhors0K4cl_BzE_P5u9kbc1HkgwyW1A_6GNAenh8Fr9AkoJ0zSakdg5Dr9AU.lu5nr7adQ_60; - __cf_bm=c6fqCfvVVdaYCZkLcuE.7OouNNV80j6MO8EU00mTjrY-1743171989-1.0.1.1-ktPAaO3lTpJTpMCzUjPnsJmCc.30YWDEi4.yrCjvrrIULg.ipK29VdpexmtH_jo87DlQ1kndA3INl_gyhCra_zJYb0nJkfJD9vXkA6oHG3A;
path=/; expires=Wed, 26-Mar-25 21:52:10 GMT; domain=.api.openai.com; HttpOnly; path=/; expires=Fri, 28-Mar-25 14:56:29 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None Secure; SameSite=None
- _cfuvid=hX9Y33ruiC9mhYzrOoxyOh23Gy.MfQa54h9l5CllWlI-1743024130948-0.0.1.1-604800000; - _cfuvid=IZ3AyFlt2EHqyn4B4VQp_xuendIynQkbNd1EvOcjBIg-1743171989763-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
X-Content-Type-Options: X-Content-Type-Options:
- nosniff - nosniff
Server: Server:
- cloudflare - cloudflare
Cf-Ray: Cf-Ray:
- 9269b3b0da83cf67-CMH - 9277cd7fba29e82d-ORD
Alt-Svc: Alt-Svc:
- h3=":443"; ma=86400 - h3=":443"; ma=86400
body: body:
encoding: UTF-8 encoding: ASCII-8BIT
string: |+ string: |-
event: response.created {
data: {"type":"response.created","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "id": "resp_67e6b194cca08192af11a393c165ab300a63c04976d80348",
"object": "response",
event: response.in_progress "created_at": 1743171988,
data: {"type":"response.in_progress","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"in_progress","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":null,"user":null,"metadata":{}}} "status": "completed",
"error": null,
event: response.output_item.added "incomplete_details": null,
data: {"type":"response.output_item.added","output_index":0,"item":{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"in_progress","role":"assistant","content":[]}} "instructions": "Use the tools available to you to answer the user's question.",
"max_output_tokens": null,
event: response.content_part.added "model": "gpt-4o-2024-08-06",
data: {"type":"response.content_part.added","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"part":{"type":"output_text","text":"","annotations":[]}} "output": [
{
event: response.output_text.delta "type": "message",
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"Your"} "id": "msg_67e6b1957b5081928709cedc2b7d3f110a63c04976d80348",
"status": "completed",
event: response.output_text.delta "role": "assistant",
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" net"} "content": [
{
event: response.output_text.delta "type": "output_text",
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" worth"} "text": "Your net worth is $124,200.",
"annotations": []
event: response.output_text.delta }
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" is"} ]
}
event: response.output_text.delta ],
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":" $"} "parallel_tool_calls": true,
"previous_response_id": "resp_67e6b19371d08192912d64ea220597a40a63c04976d80348",
event: response.output_text.delta "reasoning": {
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"124"} "effort": null,
"generate_summary": null
event: response.output_text.delta },
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":","} "store": true,
"temperature": 1.0,
event: response.output_text.delta "text": {
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"200"} "format": {
"type": "text"
event: response.output_text.delta }
data: {"type":"response.output_text.delta","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"delta":"."} },
"tool_choice": "auto",
event: response.output_text.done "tools": [
data: {"type":"response.output_text.done","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"text":"Your net worth is $124,200."} {
"type": "function",
event: response.content_part.done "description": "Gets user net worth data",
data: {"type":"response.content_part.done","item_id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","output_index":0,"content_index":0,"part":{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}} "name": "get_net_worth",
"parameters": {
event: response.output_item.done "type": "object",
data: {"type":"response.output_item.done","output_index":0,"item":{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}]}} "properties": {},
"required": [],
event: response.completed "additionalProperties": false
data: {"type":"response.completed","response":{"id":"resp_67e47002c5b48192a8202d45c6a929f8069d9116026394b6","object":"response","created_at":1743024130,"status":"completed","error":null,"incomplete_details":null,"instructions":"Use the tools available to you to answer the user's question.","max_output_tokens":null,"model":"gpt-4o-2024-08-06","output":[{"type":"message","id":"msg_67e47003483c819290ae392b826c4910069d9116026394b6","status":"completed","role":"assistant","content":[{"type":"output_text","text":"Your net worth is $124,200.","annotations":[]}]}],"parallel_tool_calls":true,"previous_response_id":"resp_67e4700196288192b27a4effc08dc47f069d9116026394b6","reasoning":{"effort":null,"generate_summary":null},"store":true,"temperature":1.0,"text":{"format":{"type":"text"}},"tool_choice":"auto","tools":[],"top_p":1.0,"truncation":"disabled","usage":{"input_tokens":85,"input_tokens_details":{"cached_tokens":0},"output_tokens":10,"output_tokens_details":{"reasoning_tokens":0},"total_tokens":95},"user":null,"metadata":{}}} },
"strict": true
recorded_at: Wed, 26 Mar 2025 21:22:11 GMT }
],
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 309,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 11,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 320
},
"user": null,
"metadata": {}
}
recorded_at: Fri, 28 Mar 2025 14:26:29 GMT
recorded_with: VCR 6.3.1 recorded_with: VCR 6.3.1
...