Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,6 @@ examples/internal/email-evals/email-evals
/dist
# Added by goreleaser init:
dist/

# emacs
*~
1 change: 0 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ help:
@echo " test-vcr-record - Record/update VCR cassettes (requires API keys)"
@echo " test-vcr-verify - Verify VCR cassettes work without API keys"
@echo " cover - Run tests with coverage report"
@echo " cover-path - Run coverage for specific path (e.g., make cover-path PATH=./config)"
@echo " clean - Clean build artifacts and coverage files"
@echo " fmt - Format Go code"
@echo " lint - Run golangci-lint"
Expand Down
26 changes: 18 additions & 8 deletions examples/openai/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (

"github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/responses"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/sdk/trace"

Expand Down Expand Up @@ -39,17 +40,26 @@ func main() {
ctx, span := tracer.Start(context.Background(), "examples/openai/main.go")
defer span.End()

// Make a simple chat completion request
resp, err := client.Chat.Completions.New(ctx, openai.ChatCompletionNewParams{
Messages: []openai.ChatCompletionMessageParamUnion{
openai.UserMessage("What is the capital of France?"),
},
// Make a simple Responses API request
resp, err := client.Responses.New(ctx, responses.ResponseNewParams{
Model: openai.ChatModelGPT4oMini,
Input: responses.ResponseNewParamsInputUnion{OfString: openai.String("What is the capital of France?")},
})
if err != nil {
log.Fatal(err)
}

fmt.Printf("Response: %s\n", resp.Choices[0].Message.Content)
fmt.Printf("View trace: %s\n", bt.Permalink(span))
switch resp.Status {
case responses.ResponseStatusCompleted:
fmt.Printf("Response: %s\n", resp.OutputText())
fmt.Printf("View trace: %s\n", bt.Permalink(span))
case responses.ResponseStatusIncomplete:
fmt.Println("incomplete:", resp.IncompleteDetails.Reason)
fmt.Printf("Response: %s\n", resp.OutputText())
fmt.Printf("View trace: %s\n", bt.Permalink(span))
case responses.ResponseStatusFailed:
fmt.Println("failed:", resp.Error.Message)
fmt.Printf("View trace: %s\n", bt.Permalink(span))
default:
fmt.Println("status:", resp.Status)
}
}
9 changes: 6 additions & 3 deletions trace/contrib/openai/responses.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,8 @@ func (rt *responsesTracer) parseStreamingResponse(span trace.Span, body io.Reade
}

if msgType, ok := envelope["type"].(string); ok {
// the response.completed message has everything, so just parse that. Should we
// parse the other messages too?
if msgType == "response.completed" {
switch msgType {
case "response.completed", "response.failed", "response.incomplete":
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For the response.failed case, can we capture the error somehow?

if msg, ok := envelope["response"].(map[string]any); ok {
// For streaming responses, copy extra fields from the envelope
// that might be present in the outer wrapper
Expand Down Expand Up @@ -181,6 +180,7 @@ func (rt *responsesTracer) handleResponseCompletedMessage(span trace.Span, rawMs
metadataFields := []string{
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

now that we are collecting response.incomplete, I think should add incomplete_details as a field here as well.

"id",
"object",
"status",
"system_fingerprint",
"completion_tokens",
"created",
Expand All @@ -193,6 +193,9 @@ func (rt *responsesTracer) handleResponseCompletedMessage(span trace.Span, rawMs
"content_filter_results",
"reasoning",
"text",
"usage",
"incomplete_details",
"error",
}

for _, field := range metadataFields {
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,164 @@
---
version: 2
interactions:
- id: 0
request:
proto: HTTP/1.1
proto_major: 1
proto_minor: 1
content_length: 64
transfer_encoding: []
trailer: {}
host: ""
remote_addr: ""
request_uri: ""
body: '{"input":"What is the capital of France?","model":"gpt-4o-mini"}'
form: {}
headers:
Accept:
- application/json
Content-Type:
- application/json
User-Agent:
- OpenAI/Go 1.12.0
X-Stainless-Arch:
- arm64
X-Stainless-Lang:
- go
X-Stainless-Os:
- MacOS
X-Stainless-Package-Version:
- 1.12.0
X-Stainless-Retry-Count:
- "0"
X-Stainless-Runtime:
- go
X-Stainless-Runtime-Version:
- go1.26.2
url: https://api.openai.com/v1/responses
method: POST
response:
proto: HTTP/2.0
proto_major: 2
proto_minor: 0
transfer_encoding: []
trailer: {}
content_length: -1
uncompressed: true
body: |-
{
"id": "resp_00297490fcbc5dcc0069f0d13e07b0819ba4cb70853f95cc0e",
"object": "response",
"created_at": 1777389886,
"status": "completed",
"background": false,
"billing": {
"payer": "developer"
},
"completed_at": 1777389887,
"error": null,
"frequency_penalty": 0.0,
"incomplete_details": null,
"instructions": null,
"max_output_tokens": null,
"max_tool_calls": null,
"model": "gpt-4o-mini-2024-07-18",
"moderation": null,
"output": [
{
"id": "msg_00297490fcbc5dcc0069f0d13f0a20819bb1002795595779e0",
"type": "message",
"status": "completed",
"content": [
{
"type": "output_text",
"annotations": [],
"logprobs": [],
"text": "The capital of France is Paris."
}
],
"role": "assistant"
}
],
"parallel_tool_calls": true,
"presence_penalty": 0.0,
"previous_response_id": null,
"prompt_cache_key": null,
"prompt_cache_retention": "in_memory",
"reasoning": {
"effort": null,
"summary": null
},
"safety_identifier": null,
"service_tier": "default",
"store": true,
"temperature": 1.0,
"text": {
"format": {
"type": "text"
},
"verbosity": "medium"
},
"tool_choice": "auto",
"tools": [],
"top_logprobs": 0,
"top_p": 1.0,
"truncation": "disabled",
"usage": {
"input_tokens": 14,
"input_tokens_details": {
"cached_tokens": 0
},
"output_tokens": 8,
"output_tokens_details": {
"reasoning_tokens": 0
},
"total_tokens": 22
},
"user": null,
"metadata": {}
}
headers:
Alt-Svc:
- h3=":443"; ma=86400
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 9f3713632b7b420b-EWR
Content-Type:
- application/json
Date:
- Tue, 28 Apr 2026 15:24:47 GMT
Openai-Organization:
- braintrust-data
Openai-Processing-Ms:
- "1247"
Openai-Project:
- proj_vsCSXafhhByzWOThMrJcZiw9
Openai-Version:
- "2020-10-01"
Server:
- cloudflare
Set-Cookie:
- __cf_bm=YyPcSpHIhDzxiZr_PPUz5BpglZaiN47o3bMWhvYrpQs-1777389885.94245-1.0.1.1-4pWcI.VMIhN1F7QL39HKPE2Vg2TBw7NY0KIK3C9oPVT8NPDyyLntvKFNnKngMo1zyBUN73JKAALmHagsaNm6ZNX5En.2r.Ce5xktre0looqN_4t5T3rQnqs5M2ZKI78t; HttpOnly; Secure; Path=/; Domain=api.openai.com; Expires=Tue, 28 Apr 2026 15:54:47 GMT
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
X-Content-Type-Options:
- nosniff
X-Ratelimit-Limit-Requests:
- "30000"
X-Ratelimit-Limit-Tokens:
- "150000000"
X-Ratelimit-Remaining-Requests:
- "29999"
X-Ratelimit-Remaining-Tokens:
- "149999967"
X-Ratelimit-Reset-Requests:
- 2ms
X-Ratelimit-Reset-Tokens:
- 0s
X-Request-Id:
- req_388462f6d6934e98b2adb25f5adaee39
status: 200 OK
code: 200
duration: 1.61271275s
Loading