mirror of
https://github.com/gosticks/openai-responses-api-go.git
synced 2025-10-16 11:55:34 +00:00
feat: initial setup
This commit is contained in:
commit
7d587a219c
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Wlad Meixner
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
332
README.md
Normal file
332
README.md
Normal file
@ -0,0 +1,332 @@
|
||||
# OpenAI Responses API Go Client
|
||||
|
||||
A Go client for the OpenAI Responses API, which combines the simplicity of Chat Completions with the tool use and state management of the Assistants API.
|
||||
|
||||
## ⚠️ Temporary Solution Warning
|
||||
|
||||
**IMPORTANT**: This library is intended as a temporary solution until the official OpenAI library includes full support for the Responses API. Once the official OpenAI library releases this functionality, it's recommended to migrate to that implementation for better maintenance and official support.
|
||||
|
||||
This implementation aims to bridge the gap between the current capabilities of the official libraries and the new Responses API features. It may not be maintained long-term once official support is available.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/gosticks/openai-responses-api-go
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Important Note
|
||||
|
||||
The OpenAI Responses API uses an `input` field that is an array of message objects, which is different from the Chat Completions API. Each message has a `role` (like "user" or "developer") and `content`.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/yourusername/openai-responses"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Create a new response
|
||||
resp, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("Hello, how are you today?"),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp.Choices[0].Message.Content)
|
||||
}
|
||||
```
|
||||
|
||||
### Streaming Responses
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/yourusername/openai-responses"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Create a new streaming response
|
||||
stream, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("Write a short poem about programming."),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating streaming response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Create an accumulator to accumulate the streaming responses
|
||||
accumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Print the streaming response
|
||||
fmt.Println("Streaming response:")
|
||||
for {
|
||||
chunk, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
accumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Convert the accumulator to a response
|
||||
resp := accumulator.ToResponse()
|
||||
|
||||
// Print the accumulated response
|
||||
fmt.Printf("\nAccumulated response: %s\n", resp.Choices[0].Message.Content)
|
||||
}
|
||||
```
|
||||
|
||||
### Using Tools
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/yourusername/openai-responses"
|
||||
)
|
||||
|
||||
// WeatherParams represents the parameters for the weather function
|
||||
type WeatherParams struct {
|
||||
Location string `json:"location"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
// getWeather is a mock function to get the weather
|
||||
func getWeather(location, unit string) string {
|
||||
// In a real application, this would call a weather API
|
||||
return fmt.Sprintf("The weather in %s is sunny and 25 degrees %s.", location, unit)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Define the weather function parameters schema
|
||||
weatherParamsSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": map[string]interface{}{
|
||||
"type": "string",
|
||||
"enum": []string{"celsius", "fahrenheit"},
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Create a new response with a function tool
|
||||
resp, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Check if the model wants to call a function
|
||||
if len(resp.Choices) > 0 && len(resp.Choices[0].ToolCalls) > 0 {
|
||||
// Get the function call
|
||||
toolCall := resp.Choices[0].ToolCalls[0]
|
||||
if toolCall.Function.Name == "get_weather" {
|
||||
// Parse the function arguments
|
||||
var params WeatherParams
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), ¶ms); err != nil {
|
||||
fmt.Printf("Error parsing function arguments: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Call the function
|
||||
unit := params.Unit
|
||||
if unit == "" {
|
||||
unit = "celsius"
|
||||
}
|
||||
result := getWeather(params.Location, unit)
|
||||
|
||||
// Print the function result
|
||||
fmt.Printf("Function result: %s\n", result)
|
||||
|
||||
// Create a new response with the function result
|
||||
resp, err = client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco?"),
|
||||
},
|
||||
// In a real application, you would include the tool calls in the messages field
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp.Choices[0].Message.Content)
|
||||
}
|
||||
```
|
||||
|
||||
### Using Built-in Tools
|
||||
|
||||
The Responses API supports built-in tools like web search, file search, and computer use. Here's how to use them:
|
||||
|
||||
```go
|
||||
// Create a new response with built-in tools
|
||||
resp, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("What's the latest news about OpenAI?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewWebSearchTool(),
|
||||
},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Response State Management
|
||||
|
||||
The Responses API allows you to manage the state of a conversation:
|
||||
|
||||
```go
|
||||
// Create a new response state
|
||||
stateResp, err := client.Responses.CreateState(
|
||||
context.Background(),
|
||||
openairesponses.ResponseStateRequest{
|
||||
Messages: []openairesponses.ResponseMessage{
|
||||
openairesponses.SystemMessage("You are a helpful assistant."),
|
||||
openairesponses.UserMessage("Hello, how are you today?"),
|
||||
openairesponses.AssistantMessage("I'm doing well, thank you for asking! How can I help you today?"),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response state: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get a response state
|
||||
stateResp, err = client.Responses.GetState(
|
||||
context.Background(),
|
||||
stateResp.ID,
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting response state: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Delete a response state
|
||||
err = client.Responses.DeleteState(
|
||||
context.Background(),
|
||||
stateResp.ID,
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error deleting response state: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This library is licensed under the MIT License. See the LICENSE file for details.
|
||||
197
client/client.go
Normal file
197
client/client.go
Normal file
@ -0,0 +1,197 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURL is the default base URL for the OpenAI API
|
||||
DefaultBaseURL = "https://api.openai.com/v1"
|
||||
// DefaultUserAgent is the default user agent for the OpenAI API client
|
||||
DefaultUserAgent = "openai-responses-api-go/1.0.0"
|
||||
// DefaultTimeout is the default timeout for API requests
|
||||
DefaultTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// Client is the client for the OpenAI Responses API
|
||||
type Client struct {
|
||||
// BaseURL is the base URL for API requests
|
||||
BaseURL string
|
||||
// APIKey is the API key for authentication
|
||||
APIKey string
|
||||
// HTTPClient is the HTTP client used to make API requests
|
||||
HTTPClient *http.Client
|
||||
// UserAgent is the user agent for API requests
|
||||
UserAgent string
|
||||
// Organization is the organization ID for API requests
|
||||
Organization string
|
||||
}
|
||||
|
||||
// ClientOption is a function that configures a Client
|
||||
type ClientOption func(*Client)
|
||||
|
||||
// WithBaseURL sets the base URL for the client
|
||||
func WithBaseURL(baseURL string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.BaseURL = baseURL
|
||||
}
|
||||
}
|
||||
|
||||
// WithAPIKey sets the API key for the client
|
||||
func WithAPIKey(apiKey string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.APIKey = apiKey
|
||||
}
|
||||
}
|
||||
|
||||
// WithHTTPClient sets the HTTP client for the client
|
||||
func WithHTTPClient(httpClient *http.Client) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.HTTPClient = httpClient
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserAgent sets the user agent for the client
|
||||
func WithUserAgent(userAgent string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.UserAgent = userAgent
|
||||
}
|
||||
}
|
||||
|
||||
// WithOrganization sets the organization ID for the client
|
||||
func WithOrganization(organization string) ClientOption {
|
||||
return func(c *Client) {
|
||||
c.Organization = organization
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient creates a new OpenAI Responses API client
|
||||
func NewClient(options ...ClientOption) *Client {
|
||||
client := &Client{
|
||||
BaseURL: DefaultBaseURL,
|
||||
UserAgent: DefaultUserAgent,
|
||||
HTTPClient: &http.Client{Timeout: DefaultTimeout},
|
||||
}
|
||||
|
||||
// Apply options
|
||||
for _, option := range options {
|
||||
option(client)
|
||||
}
|
||||
|
||||
// If API key is not set, try to get it from environment variable
|
||||
if client.APIKey == "" {
|
||||
client.APIKey = os.Getenv("OPENAI_API_KEY")
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
// APIError represents an error returned by the OpenAI API
|
||||
type APIError struct {
|
||||
Code *string `json:"code,omitempty"`
|
||||
Message string `json:"message"`
|
||||
Param *string `json:"param,omitempty"`
|
||||
Type string `json:"type"`
|
||||
StatusCode int `json:"-"`
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e *APIError) Error() string {
|
||||
if e.Code != nil {
|
||||
return fmt.Sprintf("OpenAI API error: code=%s message=%s param=%v type=%s status_code=%d",
|
||||
*e.Code, e.Message, e.Param, e.Type, e.StatusCode)
|
||||
}
|
||||
return fmt.Sprintf("OpenAI API error: message=%s param=%v type=%s status_code=%d",
|
||||
e.Message, e.Param, e.Type, e.StatusCode)
|
||||
}
|
||||
|
||||
// ErrorResponse represents the error response from the OpenAI API
|
||||
type ErrorResponse struct {
|
||||
Error *APIError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// request makes an HTTP request to the OpenAI API
|
||||
func (c *Client) request(ctx context.Context, method, path string, body interface{}, v interface{}) error {
|
||||
// Construct the URL
|
||||
u, err := url.Parse(c.BaseURL + path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the request body
|
||||
var reqBody io.Reader
|
||||
if body != nil {
|
||||
jsonBody, err := json.Marshal(body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqBody = bytes.NewBuffer(jsonBody)
|
||||
}
|
||||
|
||||
// Create the request
|
||||
req, err := http.NewRequestWithContext(ctx, method, u.String(), reqBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
req.Header.Set("Authorization", "Bearer "+c.APIKey)
|
||||
if c.Organization != "" {
|
||||
req.Header.Set("OpenAI-Organization", c.Organization)
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := c.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check for errors
|
||||
if resp.StatusCode >= 400 {
|
||||
var errResp ErrorResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil {
|
||||
return fmt.Errorf("error decoding error response: %w", err)
|
||||
}
|
||||
if errResp.Error != nil {
|
||||
errResp.Error.StatusCode = resp.StatusCode
|
||||
return errResp.Error
|
||||
}
|
||||
return fmt.Errorf("unknown error, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Decode the response
|
||||
if v != nil {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// get makes a GET request to the OpenAI API
|
||||
func (c *Client) get(ctx context.Context, path string, v interface{}) error {
|
||||
return c.request(ctx, http.MethodGet, path, nil, v)
|
||||
}
|
||||
|
||||
// post makes a POST request to the OpenAI API
|
||||
func (c *Client) post(ctx context.Context, path string, body interface{}, v interface{}) error {
|
||||
return c.request(ctx, http.MethodPost, path, body, v)
|
||||
}
|
||||
|
||||
// delete makes a DELETE request to the OpenAI API
|
||||
func (c *Client) delete(ctx context.Context, path string, v interface{}) error {
|
||||
return c.request(ctx, http.MethodDelete, path, nil, v)
|
||||
}
|
||||
580
client/responses.go
Normal file
580
client/responses.go
Normal file
@ -0,0 +1,580 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/gosticks/openai-responses-api-go/models"
|
||||
)
|
||||
|
||||
const (
|
||||
responsesEndpoint = "/responses"
|
||||
responsesStateEndpoint = "/responses/state"
|
||||
)
|
||||
|
||||
// Responses is the client for the OpenAI Responses API
|
||||
type Responses struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// NewResponses creates a new Responses client
|
||||
func NewResponses(client *Client) *Responses {
|
||||
return &Responses{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a new response
|
||||
func (r *Responses) Create(ctx context.Context, request models.ResponseRequest) (*models.ResponseResponse, error) {
|
||||
var response models.ResponseResponse
|
||||
err := r.client.post(ctx, responsesEndpoint, request, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set the OutputText field based on the first choice's content
|
||||
if len(response.Choices) > 0 && response.Choices[0].Message.Content != "" {
|
||||
response.OutputText = response.Choices[0].Message.Content
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// CreateStream creates a new streaming response
|
||||
func (r *Responses) CreateStream(ctx context.Context, request models.ResponseRequest) (*ResponsesStream, error) {
|
||||
// Ensure streaming is enabled
|
||||
request.Stream = true
|
||||
|
||||
// Create the request
|
||||
reqBody, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct the URL
|
||||
u := r.client.BaseURL + responsesEndpoint
|
||||
|
||||
// Create the HTTP request
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, strings.NewReader(string(reqBody)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept", "application/json")
|
||||
req.Header.Set("User-Agent", r.client.UserAgent)
|
||||
req.Header.Set("Authorization", "Bearer "+r.client.APIKey)
|
||||
if r.client.Organization != "" {
|
||||
req.Header.Set("OpenAI-Organization", r.client.Organization)
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := r.client.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check for errors
|
||||
if resp.StatusCode >= 400 {
|
||||
defer resp.Body.Close()
|
||||
var errResp ErrorResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil {
|
||||
return nil, fmt.Errorf("error decoding error response: %w", err)
|
||||
}
|
||||
if errResp.Error != nil {
|
||||
errResp.Error.StatusCode = resp.StatusCode
|
||||
return nil, errResp.Error
|
||||
}
|
||||
return nil, fmt.Errorf("unknown error, status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
return &ResponsesStream{
|
||||
reader: bufio.NewReader(resp.Body),
|
||||
response: resp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateState creates a new response state
|
||||
func (r *Responses) CreateState(ctx context.Context, request models.ResponseStateRequest) (*models.ResponseStateResponse, error) {
|
||||
var response models.ResponseStateResponse
|
||||
err := r.client.post(ctx, responsesStateEndpoint, request, &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// GetState gets a response state
|
||||
func (r *Responses) GetState(ctx context.Context, id string) (*models.ResponseStateResponse, error) {
|
||||
var response models.ResponseStateResponse
|
||||
err := r.client.get(ctx, fmt.Sprintf("%s/%s", responsesStateEndpoint, id), &response)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// DeleteState deletes a response state
|
||||
func (r *Responses) DeleteState(ctx context.Context, id string) error {
|
||||
return r.client.delete(ctx, fmt.Sprintf("%s/%s", responsesStateEndpoint, id), nil)
|
||||
}
|
||||
|
||||
// ResponsesStream is a stream of responses
|
||||
type ResponsesStream struct {
|
||||
reader *bufio.Reader
|
||||
response *http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
// Recv receives the next response from the stream
|
||||
func (s *ResponsesStream) Recv() (*models.ResponseStreamResponse, error) {
|
||||
// Check if there was a previous error
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
|
||||
// Read the next line
|
||||
line, err := s.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
s.err = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Skip empty lines
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
return s.Recv()
|
||||
}
|
||||
|
||||
// Check for data prefix
|
||||
const prefix = "data: "
|
||||
if !strings.HasPrefix(line, prefix) {
|
||||
return s.Recv()
|
||||
}
|
||||
|
||||
// Extract the data
|
||||
data := strings.TrimPrefix(line, prefix)
|
||||
|
||||
// Check for the end of the stream
|
||||
if data == "[DONE]" {
|
||||
s.err = io.EOF
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
// Parse the new response format
|
||||
var eventData map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(data), &eventData); err != nil {
|
||||
s.err = err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a response object
|
||||
response := &models.ResponseStreamResponse{}
|
||||
|
||||
// Extract the event type
|
||||
eventType, _ := eventData["type"].(string)
|
||||
|
||||
// Handle different event types
|
||||
switch eventType {
|
||||
case "response.created", "response.in_progress":
|
||||
// Extract response data
|
||||
if respData, ok := eventData["response"].(map[string]interface{}); ok {
|
||||
response.ID, _ = respData["id"].(string)
|
||||
response.Object, _ = respData["object"].(string)
|
||||
if createdAt, ok := respData["created_at"].(float64); ok {
|
||||
response.Created = int64(createdAt)
|
||||
}
|
||||
response.Model, _ = respData["model"].(string)
|
||||
}
|
||||
case "response.output_text.delta":
|
||||
// Extract delta text
|
||||
delta, _ := eventData["delta"].(string)
|
||||
|
||||
// Create a choice with the delta content
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: 0,
|
||||
Delta: models.ResponseStreamDelta{
|
||||
Content: delta,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Output item events
|
||||
case "response.output_item.added":
|
||||
// A new output item (e.g., function call) is added
|
||||
if item, ok := eventData["item"].(map[string]interface{}); ok {
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
itemType, _ := item["type"].(string)
|
||||
|
||||
if itemType == "function_call" {
|
||||
id, _ := item["id"].(string)
|
||||
callID, _ := item["call_id"].(string)
|
||||
name, _ := item["name"].(string)
|
||||
|
||||
toolCall := models.ResponseToolCall{
|
||||
ID: id,
|
||||
CallID: callID,
|
||||
Type: "function",
|
||||
}
|
||||
toolCall.Function.Name = name
|
||||
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
ToolCalls: []models.ResponseToolCall{toolCall},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "response.output_item.done":
|
||||
// An output item has completed
|
||||
if item, ok := eventData["item"].(map[string]interface{}); ok {
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
itemType, _ := item["type"].(string)
|
||||
|
||||
if itemType == "function_call" {
|
||||
id, _ := item["id"].(string)
|
||||
callID, _ := item["call_id"].(string)
|
||||
name, _ := item["name"].(string)
|
||||
arguments, _ := item["arguments"].(string)
|
||||
|
||||
toolCall := models.ResponseToolCall{
|
||||
ID: id,
|
||||
CallID: callID,
|
||||
Type: "function",
|
||||
}
|
||||
toolCall.Function.Name = name
|
||||
toolCall.Function.Arguments = arguments
|
||||
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
ToolCalls: []models.ResponseToolCall{toolCall},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// File search related events
|
||||
case "response.file_search_call.in_progress",
|
||||
"response.file_search_call.searching",
|
||||
"response.file_search_call.completed":
|
||||
// A file search is in progress or completed
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
if itemID, ok := eventData["item_id"].(string); ok {
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
// Create a tool call for file search
|
||||
ToolCalls: []models.ResponseToolCall{
|
||||
{
|
||||
ID: itemID,
|
||||
Type: "file_search", // Use file_search as type
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Function call events
|
||||
case "response.tool_call.created", "response.tool_call.in_progress":
|
||||
// A tool call is being created
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
// An empty delta to indicate a tool call is being created
|
||||
ToolCalls: []models.ResponseToolCall{
|
||||
{
|
||||
// Empty tool call to be populated with subsequent events
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
case "response.tool_call.id":
|
||||
// Get the tool call ID
|
||||
if toolCallID, ok := eventData["id"].(string); ok {
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
ToolCalls: []models.ResponseToolCall{
|
||||
{
|
||||
ID: toolCallID,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
// Function call argument events
|
||||
case "response.function_call_arguments.delta":
|
||||
// Get the function arguments delta
|
||||
if delta, ok := eventData["delta"].(string); ok {
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
if itemID, ok := eventData["item_id"].(string); ok {
|
||||
toolCall := models.ResponseToolCall{
|
||||
ID: itemID,
|
||||
}
|
||||
toolCall.Function.Arguments = delta
|
||||
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
ToolCalls: []models.ResponseToolCall{toolCall},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "response.function_call_arguments.done":
|
||||
// Get the complete function arguments
|
||||
if arguments, ok := eventData["arguments"].(string); ok {
|
||||
if index, ok := eventData["output_index"].(float64); ok {
|
||||
if itemID, ok := eventData["item_id"].(string); ok {
|
||||
toolCall := models.ResponseToolCall{
|
||||
ID: itemID,
|
||||
}
|
||||
toolCall.Function.Arguments = arguments
|
||||
|
||||
response.Choices = []models.ResponseStreamChoice{
|
||||
{
|
||||
Index: int(index),
|
||||
Delta: models.ResponseStreamDelta{
|
||||
ToolCalls: []models.ResponseToolCall{toolCall},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "response.completed", "response.incomplete":
|
||||
// Extract usage data if available
|
||||
if respData, ok := eventData["response"].(map[string]interface{}); ok {
|
||||
response.ID, _ = respData["id"].(string)
|
||||
response.Object, _ = respData["object"].(string)
|
||||
if createdAt, ok := respData["created_at"].(float64); ok {
|
||||
response.Created = int64(createdAt)
|
||||
}
|
||||
response.Model, _ = respData["model"].(string)
|
||||
|
||||
if usageData, ok := respData["usage"].(map[string]interface{}); ok {
|
||||
promptTokens, _ := usageData["prompt_tokens"].(float64)
|
||||
completionTokens, _ := usageData["completion_tokens"].(float64)
|
||||
totalTokens, _ := usageData["total_tokens"].(float64)
|
||||
|
||||
response.Usage = &models.Usage{
|
||||
PromptTokens: int(promptTokens),
|
||||
CompletionTokens: int(completionTokens),
|
||||
TotalTokens: int(totalTokens),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal that this is the end of the stream
|
||||
s.err = io.EOF
|
||||
}
|
||||
|
||||
// Skip events that don't contain useful data for our client
|
||||
if len(response.Choices) == 0 && response.ID == "" && response.Usage == nil {
|
||||
return s.Recv()
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// Close closes the stream
|
||||
func (s *ResponsesStream) Close() error {
|
||||
if s.response != nil && s.response.Body != nil {
|
||||
return s.response.Body.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Err returns the last error that occurred while reading from the stream
|
||||
func (s *ResponsesStream) Err() error {
|
||||
if s.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return s.err
|
||||
}
|
||||
|
||||
// ResponsesStreamAccumulator accumulates streaming responses
|
||||
type ResponsesStreamAccumulator struct {
|
||||
ID string
|
||||
Object string
|
||||
Created int64
|
||||
Model string
|
||||
Choices []models.ResponseChoice
|
||||
Usage *models.Usage
|
||||
}
|
||||
|
||||
// AddChunk adds a chunk to the accumulator
|
||||
func (a *ResponsesStreamAccumulator) AddChunk(chunk *models.ResponseStreamResponse) {
|
||||
// Initialize the accumulator if this is the first chunk with an ID
|
||||
if a.ID == "" && chunk.ID != "" {
|
||||
a.ID = chunk.ID
|
||||
a.Object = chunk.Object
|
||||
a.Created = chunk.Created
|
||||
a.Model = chunk.Model
|
||||
|
||||
// Initialize choices if there are any in the chunk
|
||||
if len(chunk.Choices) > 0 {
|
||||
a.Choices = make([]models.ResponseChoice, len(chunk.Choices))
|
||||
for i := range chunk.Choices {
|
||||
a.Choices[i] = models.ResponseChoice{
|
||||
Index: chunk.Choices[i].Index,
|
||||
Message: models.ResponseMessage{Role: "assistant"},
|
||||
FinishReason: "",
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Initialize with at least one choice for content
|
||||
a.Choices = []models.ResponseChoice{
|
||||
{
|
||||
Index: 0,
|
||||
Message: models.ResponseMessage{Role: "assistant"},
|
||||
FinishReason: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this chunk has usage data, store it
|
||||
if chunk.Usage != nil {
|
||||
a.Usage = chunk.Usage
|
||||
}
|
||||
|
||||
// Ensure we have at least one choice for content
|
||||
if len(a.Choices) == 0 && len(chunk.Choices) > 0 {
|
||||
a.Choices = []models.ResponseChoice{
|
||||
{
|
||||
Index: 0,
|
||||
Message: models.ResponseMessage{Role: "assistant"},
|
||||
FinishReason: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Update the accumulator with the chunk data
|
||||
for _, choice := range chunk.Choices {
|
||||
// Ensure we have enough choices
|
||||
for len(a.Choices) <= choice.Index {
|
||||
a.Choices = append(a.Choices, models.ResponseChoice{
|
||||
Index: len(a.Choices),
|
||||
Message: models.ResponseMessage{Role: "assistant"},
|
||||
FinishReason: "",
|
||||
})
|
||||
}
|
||||
|
||||
// Update the message
|
||||
if choice.Delta.Role != "" {
|
||||
a.Choices[choice.Index].Message.Role = choice.Delta.Role
|
||||
}
|
||||
if choice.Delta.Content != "" {
|
||||
a.Choices[choice.Index].Message.Content += choice.Delta.Content
|
||||
}
|
||||
|
||||
// Update the tool calls
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
for _, toolCallDelta := range choice.Delta.ToolCalls {
|
||||
// If our choice doesn't have tool calls yet, initialize the slice
|
||||
if a.Choices[choice.Index].ToolCalls == nil {
|
||||
a.Choices[choice.Index].ToolCalls = []models.ResponseToolCall{}
|
||||
}
|
||||
|
||||
// Find if we already have this tool call
|
||||
toolCallIndex := -1
|
||||
for i, existingToolCall := range a.Choices[choice.Index].ToolCalls {
|
||||
if existingToolCall.ID == toolCallDelta.ID {
|
||||
toolCallIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have this tool call yet, add it
|
||||
if toolCallIndex == -1 {
|
||||
a.Choices[choice.Index].ToolCalls = append(a.Choices[choice.Index].ToolCalls, models.ResponseToolCall{
|
||||
ID: toolCallDelta.ID,
|
||||
Type: toolCallDelta.Type,
|
||||
CallID: toolCallDelta.CallID,
|
||||
Function: struct {
|
||||
Name string `json:"name"`
|
||||
Arguments string `json:"arguments"`
|
||||
}{
|
||||
Name: toolCallDelta.Function.Name,
|
||||
Arguments: toolCallDelta.Function.Arguments,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
// Update existing tool call
|
||||
if toolCallDelta.Type != "" {
|
||||
a.Choices[choice.Index].ToolCalls[toolCallIndex].Type = toolCallDelta.Type
|
||||
}
|
||||
if toolCallDelta.Function.Name != "" {
|
||||
a.Choices[choice.Index].ToolCalls[toolCallIndex].Function.Name = toolCallDelta.Function.Name
|
||||
}
|
||||
if toolCallDelta.Function.Arguments != "" {
|
||||
a.Choices[choice.Index].ToolCalls[toolCallIndex].Function.Arguments += toolCallDelta.Function.Arguments
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update the finish reason
|
||||
if choice.FinishReason != "" {
|
||||
a.Choices[choice.Index].FinishReason = choice.FinishReason
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ToResponse converts the accumulator to a response
|
||||
func (a *ResponsesStreamAccumulator) ToResponse() *models.ResponseResponse {
|
||||
choices := make([]models.ResponseChoice, len(a.Choices))
|
||||
for i, choice := range a.Choices {
|
||||
choices[i] = models.ResponseChoice{
|
||||
Index: choice.Index,
|
||||
Message: choice.Message,
|
||||
FinishReason: choice.FinishReason,
|
||||
}
|
||||
|
||||
// Include tool calls if present
|
||||
if len(choice.ToolCalls) > 0 {
|
||||
choices[i].ToolCalls = choice.ToolCalls
|
||||
}
|
||||
}
|
||||
|
||||
return &models.ResponseResponse{
|
||||
ID: a.ID,
|
||||
Object: a.Object,
|
||||
Created: a.Created,
|
||||
Model: a.Model,
|
||||
Choices: choices,
|
||||
Usage: a.Usage,
|
||||
}
|
||||
}
|
||||
148
examples/advanced_options/main.go
Normal file
148
examples/advanced_options/main.go
Normal file
@ -0,0 +1,148 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Example 1: Using max_output_tokens to limit response length
|
||||
fmt.Println("Example 1: Using max_output_tokens to limit response length")
|
||||
resp1, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("Write a detailed essay about artificial intelligence."),
|
||||
},
|
||||
// Limit the response to 100 tokens
|
||||
MaxOutputTokens: 100,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp1.Choices[0].Message.Content)
|
||||
printUsage(resp1)
|
||||
|
||||
fmt.Println("\n---\n")
|
||||
|
||||
// Example 2: Using instructions to set system message
|
||||
fmt.Println("Example 2: Using instructions to set system message")
|
||||
resp2, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("Tell me a joke."),
|
||||
},
|
||||
// Set custom instructions
|
||||
Instructions: "You are a comedian assistant that specializes in dad jokes.",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp2.Choices[0].Message.Content)
|
||||
printUsage(resp2)
|
||||
|
||||
fmt.Println("\n---\n")
|
||||
|
||||
// Example 3: Multi-turn conversation with previous_response_id
|
||||
fmt.Println("Example 3: Multi-turn conversation with previous_response_id")
|
||||
|
||||
// First turn
|
||||
fmt.Println("First turn:")
|
||||
resp3, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("What's the capital of France?"),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp3.Choices[0].Message.Content)
|
||||
printUsage(resp3)
|
||||
|
||||
// Second turn using previous_response_id
|
||||
fmt.Println("\nSecond turn (using previous_response_id):")
|
||||
resp4, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("What's the population of that city?"),
|
||||
},
|
||||
// Use the previous response ID to continue the conversation
|
||||
PreviousResponseID: resp3.ID,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp4.Choices[0].Message.Content)
|
||||
printUsage(resp4)
|
||||
|
||||
// Third turn with new instructions
|
||||
fmt.Println("\nThird turn (with new instructions):")
|
||||
resp5, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("Tell me more interesting facts about this city."),
|
||||
},
|
||||
// Use the previous response ID to continue the conversation
|
||||
PreviousResponseID: resp4.ID,
|
||||
// Change the instructions for this turn
|
||||
Instructions: "You are a travel guide that provides interesting and unusual facts about cities.",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp5.Choices[0].Message.Content)
|
||||
printUsage(resp5)
|
||||
}
|
||||
|
||||
// Helper function to print usage information
|
||||
func printUsage(resp *openairesponses.ResponseResponse) {
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nUsage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
81
examples/file_search/main.go
Normal file
81
examples/file_search/main.go
Normal file
@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Example 1: Using NewFileSearchTool with vector store IDs and max results
|
||||
fmt.Println("Example 1: Using NewFileSearchTool with vector store IDs and max results")
|
||||
resp1, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("What are the attributes of an ancient brown dragon?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
// Specify vector store IDs and max results for file search
|
||||
openairesponses.NewFileSearchTool([]string{"vs_1234567890"}, 20),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp1.Choices[0].Message.Content)
|
||||
printUsage(resp1)
|
||||
|
||||
fmt.Println("\n---\n")
|
||||
|
||||
// Example 2: Using NewFileSearchToolWithIDs for simpler cases
|
||||
fmt.Println("Example 2: Using NewFileSearchToolWithIDs for simpler cases")
|
||||
resp2, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("Find information about climate change in my documents."),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
// Just specify vector store IDs
|
||||
openairesponses.NewFileSearchToolWithIDs("vs_1234567890", "vs_0987654321"),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp2.Choices[0].Message.Content)
|
||||
printUsage(resp2)
|
||||
}
|
||||
|
||||
// Helper function to print usage information
|
||||
func printUsage(resp *openairesponses.ResponseResponse) {
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nUsage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
160
examples/function_call_output/main.go
Normal file
160
examples/function_call_output/main.go
Normal file
@ -0,0 +1,160 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
// WeatherParams represents the parameters for the weather function
|
||||
type WeatherParams struct {
|
||||
Location string `json:"location"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
// getWeather is a mock function to get the weather
|
||||
func getWeather(location, unit string) string {
|
||||
// In a real application, this would call a weather API
|
||||
return fmt.Sprintf("The weather in %s is sunny and 25 degrees %s.", location, unit)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Define the weather function parameters schema
|
||||
weatherParamsSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": map[string]interface{}{
|
||||
"type": "string",
|
||||
"enum": []string{"celsius", "fahrenheit"},
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Define tools
|
||||
tools := []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
}
|
||||
|
||||
// Create initial messages
|
||||
input := []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant with access to weather information."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco and how does it compare to New York?"),
|
||||
}
|
||||
|
||||
// Create a new response with a function tool
|
||||
fmt.Println("Creating initial response...")
|
||||
resp1, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: input,
|
||||
Tools: tools,
|
||||
Store: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp1.GetOutputText())
|
||||
|
||||
// Check if the model wants to call a function
|
||||
if len(resp1.Choices) > 0 && len(resp1.Choices[0].ToolCalls) > 0 {
|
||||
fmt.Println("\nProcessing tool calls...")
|
||||
|
||||
// Create a new input array that includes the previous conversation
|
||||
newInput := make([]openairesponses.ResponseInputMessage, len(input))
|
||||
copy(newInput, input)
|
||||
|
||||
// Process each tool call
|
||||
for _, toolCall := range resp1.Choices[0].ToolCalls {
|
||||
if toolCall.Function.Name == "get_weather" {
|
||||
// Parse the function arguments
|
||||
var params WeatherParams
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), ¶ms); err != nil {
|
||||
fmt.Printf("Error parsing function arguments: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Call the function
|
||||
unit := params.Unit
|
||||
if unit == "" {
|
||||
unit = "celsius"
|
||||
}
|
||||
result := getWeather(params.Location, unit)
|
||||
fmt.Printf("Function %s returned: %s\n", toolCall.Function.Name, result)
|
||||
|
||||
// Append the model's function call to the input
|
||||
newInput = append(newInput, openairesponses.ResponseInputMessage{
|
||||
Role: "assistant",
|
||||
Content: fmt.Sprintf("I need to call the %s function to get weather information for %s.", toolCall.Function.Name, params.Location),
|
||||
})
|
||||
|
||||
// Append the function call result to the input using the new format
|
||||
newInput = append(newInput, openairesponses.FunctionCallOutputMessage(
|
||||
toolCall.GetCallID(),
|
||||
result,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
// Create a follow-up response with the function results
|
||||
fmt.Println("\nCreating follow-up response with function results...")
|
||||
resp2, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: newInput,
|
||||
Tools: tools,
|
||||
Store: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating follow-up response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the follow-up response
|
||||
fmt.Printf("\nFollow-up response: %s\n", resp2.GetOutputText())
|
||||
|
||||
// Print usage information if available
|
||||
if resp2.Usage != nil {
|
||||
fmt.Printf("\nFollow-up usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp2.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp2.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp2.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
|
||||
// Print usage information if available
|
||||
if resp1.Usage != nil {
|
||||
fmt.Printf("\nInitial usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp1.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp1.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp1.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
72
examples/function_fix_test/main.go
Normal file
72
examples/function_fix_test/main.go
Normal file
@ -0,0 +1,72 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Define a simple function with parameters
|
||||
weatherParamsSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Create a request with a function tool
|
||||
req := openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("What's the weather in New York?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
// Print the request as JSON for debugging
|
||||
reqJson, _ := json.MarshalIndent(req, "", " ")
|
||||
fmt.Println("Request JSON:")
|
||||
fmt.Println(string(reqJson))
|
||||
|
||||
// Try to send the request
|
||||
fmt.Println("\nSending request to OpenAI API...")
|
||||
resp, err := client.Responses.Create(context.Background(), req)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Response received successfully!")
|
||||
fmt.Printf("Response ID: %s\n", resp.ID)
|
||||
if len(resp.Choices) > 0 && len(resp.Choices[0].ToolCalls) > 0 {
|
||||
fmt.Println("Tool calls:")
|
||||
for _, tc := range resp.Choices[0].ToolCalls {
|
||||
fmt.Printf(" Function: %s\n", tc.Function.Name)
|
||||
fmt.Printf(" Arguments: %s\n", tc.Function.Arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
196
examples/function_streaming/main.go
Normal file
196
examples/function_streaming/main.go
Normal file
@ -0,0 +1,196 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/gosticks/openai-responses-api-go/client"
|
||||
"github.com/gosticks/openai-responses-api-go/models"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Create a new client
|
||||
c := client.NewClient(client.WithAPIKey(os.Getenv("OPENAI_API_KEY")))
|
||||
|
||||
// Create a responses client
|
||||
responsesClient := client.NewResponses(c)
|
||||
|
||||
// Create a weather function parameter schema
|
||||
weatherParamSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Create a weather function tool
|
||||
weatherTool := models.ResponseTool{
|
||||
Type: "function",
|
||||
Name: "get_weather",
|
||||
Description: "Get the current weather in a given location",
|
||||
Parameters: weatherParamSchema,
|
||||
}
|
||||
|
||||
// Create a file search tool
|
||||
fileSearchTool := models.ResponseTool{
|
||||
Type: "file_search",
|
||||
Description: "Search through files to find relevant information",
|
||||
VectorStoreIDs: []string{"default_store"},
|
||||
MaxNumResults: 3,
|
||||
}
|
||||
|
||||
// Define the query prompt - specifically designed to trigger both tool types
|
||||
userPrompt := "What's the weather like in New York, and can you find information about weather patterns in my documents?"
|
||||
|
||||
// Print what we're doing
|
||||
fmt.Println("Testing OpenAI Responses API with streaming function calls and file search")
|
||||
fmt.Printf("Prompt: %s\n\n", userPrompt)
|
||||
|
||||
// Create a request
|
||||
req := models.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []models.ResponseInputMessage{
|
||||
{
|
||||
Role: "user",
|
||||
Content: userPrompt,
|
||||
},
|
||||
},
|
||||
Tools: []models.ResponseTool{weatherTool, fileSearchTool},
|
||||
Stream: true,
|
||||
}
|
||||
|
||||
// Print the request as JSON for debugging
|
||||
reqJSON, _ := json.MarshalIndent(req, "", " ")
|
||||
fmt.Printf("Request: %s\n\n", reqJSON)
|
||||
|
||||
// Create a stream
|
||||
ctx := context.Background()
|
||||
stream, err := responsesClient.CreateStream(ctx, req)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating stream: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Create a new accumulator
|
||||
accumulator := &client.ResponsesStreamAccumulator{}
|
||||
|
||||
// Read from the stream
|
||||
fmt.Println("Streaming response:")
|
||||
for {
|
||||
resp, err := stream.Recv()
|
||||
if err != nil {
|
||||
fmt.Printf("\nStream closed with error: %v\n", err)
|
||||
break
|
||||
}
|
||||
|
||||
// Add the chunk to our accumulator
|
||||
accumulator.AddChunk(resp)
|
||||
|
||||
// Print the streaming update
|
||||
printStreamingUpdate(resp)
|
||||
}
|
||||
|
||||
// Get the final result
|
||||
finalResponse := accumulator.ToResponse()
|
||||
|
||||
// Pretty print the final response
|
||||
fmt.Println("\n\n--- Final Accumulated Response ---")
|
||||
finalJSON, _ := json.MarshalIndent(finalResponse, "", " ")
|
||||
fmt.Printf("%s\n", finalJSON)
|
||||
|
||||
// Check if the response contains tool calls
|
||||
if len(finalResponse.Choices) > 0 && len(finalResponse.Choices[0].ToolCalls) > 0 {
|
||||
fmt.Println("\n--- Tool Call Details ---")
|
||||
for i, toolCall := range finalResponse.Choices[0].ToolCalls {
|
||||
fmt.Printf("Tool Call #%d:\n", i+1)
|
||||
fmt.Printf(" ID: %s\n", toolCall.ID)
|
||||
fmt.Printf(" Type: %s\n", toolCall.Type)
|
||||
|
||||
if toolCall.Type == "function" {
|
||||
fmt.Printf(" Function: %s\n", toolCall.Function.Name)
|
||||
fmt.Printf(" Arguments: %s\n", toolCall.Function.Arguments)
|
||||
|
||||
// Parse the arguments for display
|
||||
var args map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), &args); err == nil {
|
||||
fmt.Printf(" Parsed Arguments: %v\n", args)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// printStreamingUpdate prints relevant information from a streaming chunk
|
||||
func printStreamingUpdate(chunk *models.ResponseStreamResponse) {
|
||||
// Print ID and model information when available
|
||||
if chunk.ID != "" {
|
||||
fmt.Printf("\n[Response ID: %s, Model: %s]", chunk.ID, chunk.Model)
|
||||
}
|
||||
|
||||
// Print usage information when available
|
||||
if chunk.Usage != nil {
|
||||
fmt.Printf("\n[Usage - Prompt: %d, Completion: %d, Total: %d]",
|
||||
chunk.Usage.PromptTokens,
|
||||
chunk.Usage.CompletionTokens,
|
||||
chunk.Usage.TotalTokens)
|
||||
}
|
||||
|
||||
for _, choice := range chunk.Choices {
|
||||
// Print content delta
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
|
||||
// Print tool call information
|
||||
for _, toolCall := range choice.Delta.ToolCalls {
|
||||
// Print basic tool call info
|
||||
if toolCall.ID != "" {
|
||||
fmt.Printf("\n[Tool Call ID: %s]", toolCall.ID)
|
||||
}
|
||||
|
||||
if toolCall.Type != "" {
|
||||
// Handle different tool call types
|
||||
switch toolCall.Type {
|
||||
case "function":
|
||||
fmt.Printf("\n[Function Call]")
|
||||
case "file_search":
|
||||
fmt.Printf("\n[File Search Call]")
|
||||
default:
|
||||
fmt.Printf("\n[Tool Call Type: %s]", toolCall.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// Print function details
|
||||
if toolCall.Function.Name != "" {
|
||||
fmt.Printf("\n[Function Name: %s]", toolCall.Function.Name)
|
||||
}
|
||||
|
||||
if toolCall.Function.Arguments != "" {
|
||||
// Check if it's a complete JSON or a delta
|
||||
if isValidJSON(toolCall.Function.Arguments) {
|
||||
fmt.Printf("\n[Complete Arguments: %s]", toolCall.Function.Arguments)
|
||||
} else {
|
||||
fmt.Printf("\n[Argument Delta: %s]", toolCall.Function.Arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print finish reason
|
||||
if choice.FinishReason != "" {
|
||||
fmt.Printf("\n[Finished: %s]", choice.FinishReason)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isValidJSON checks if a string is valid JSON
|
||||
func isValidJSON(s string) bool {
|
||||
var js json.RawMessage
|
||||
return json.Unmarshal([]byte(s), &js) == nil
|
||||
}
|
||||
48
examples/simple/main.go
Normal file
48
examples/simple/main.go
Normal file
@ -0,0 +1,48 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Create a new response
|
||||
resp, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("Hello, how are you today?"),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp.Choices[0].Message.Content)
|
||||
|
||||
// Print the usage information if available
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nUsage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
90
examples/streaming/main.go
Normal file
90
examples/streaming/main.go
Normal file
@ -0,0 +1,90 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Create a new streaming response
|
||||
stream, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("Write a short poem about programming."),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating streaming response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Create an accumulator to accumulate the streaming responses
|
||||
accumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Print the streaming response
|
||||
fmt.Println("Streaming response:")
|
||||
contentReceived := false
|
||||
for {
|
||||
chunk, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
fmt.Println("\nStream closed")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
accumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk content if available
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
contentReceived = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !contentReceived {
|
||||
fmt.Println("No content was streamed.")
|
||||
}
|
||||
|
||||
// Convert the accumulator to a response
|
||||
resp := accumulator.ToResponse()
|
||||
|
||||
// Print the accumulated response
|
||||
if len(resp.Choices) > 0 && resp.Choices[0].Message.Content != "" {
|
||||
fmt.Printf("\nAccumulated response: %s\n", resp.Choices[0].Message.Content)
|
||||
} else {
|
||||
fmt.Println("\nNo content received in the response.")
|
||||
}
|
||||
|
||||
// Print the usage information if available
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nUsage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
366
examples/streaming_tools/main.go
Normal file
366
examples/streaming_tools/main.go
Normal file
@ -0,0 +1,366 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
// WeatherParams represents the parameters for the weather function
|
||||
type WeatherParams struct {
|
||||
Location string `json:"location"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
// getWeather is a mock function to get the weather
|
||||
func getWeather(location, unit string) string {
|
||||
// In a real application, this would call a weather API
|
||||
return fmt.Sprintf("The weather in %s is sunny and 25 degrees %s.", location, unit)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Define the weather function parameters schema
|
||||
weatherParamsSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": map[string]interface{}{
|
||||
"type": "string",
|
||||
"enum": []string{"celsius", "fahrenheit"},
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Create a new streaming response with a function tool
|
||||
fmt.Println("Creating streaming response with function tool...")
|
||||
stream, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant with access to weather information."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
},
|
||||
// Set instructions for the model
|
||||
Instructions: "You are a weather assistant. Always use the get_weather function to retrieve weather information.",
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating streaming response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer stream.Close()
|
||||
|
||||
// Create an accumulator to accumulate the streaming responses
|
||||
accumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Process the streaming response
|
||||
fmt.Println("Streaming response:")
|
||||
for {
|
||||
chunk, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
fmt.Println("\nStream closed")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
accumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk content if available
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
|
||||
// Print tool calls if available
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
for _, toolCall := range choice.Delta.ToolCalls {
|
||||
fmt.Printf("\nTool call received - Function: %s\n", toolCall.Function.Name)
|
||||
if toolCall.Function.Arguments != "" {
|
||||
fmt.Printf("Arguments: %s\n", toolCall.Function.Arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the accumulator to a response
|
||||
resp := accumulator.ToResponse()
|
||||
|
||||
// Check if the model wants to call a function
|
||||
if len(resp.Choices) > 0 && len(resp.Choices[0].ToolCalls) > 0 {
|
||||
fmt.Println("\n\nProcessing tool calls...")
|
||||
|
||||
// Process each tool call
|
||||
for _, toolCall := range resp.Choices[0].ToolCalls {
|
||||
if toolCall.Function.Name == "get_weather" {
|
||||
// Parse the function arguments
|
||||
var params WeatherParams
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), ¶ms); err != nil {
|
||||
fmt.Printf("Error parsing function arguments: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Call the function
|
||||
unit := params.Unit
|
||||
if unit == "" {
|
||||
unit = "celsius"
|
||||
}
|
||||
result := getWeather(params.Location, unit)
|
||||
fmt.Printf("\nFunction result: %s\n", result)
|
||||
|
||||
// Create a new streaming response with the function result using previous_response_id
|
||||
fmt.Println("\nCreating follow-up streaming response with function result...")
|
||||
followUpStream, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
// Only need to provide the new user message and tool result
|
||||
openairesponses.SystemInputMessage(fmt.Sprintf("Function get_weather returned: %s", result)),
|
||||
},
|
||||
// Use the previous response ID to continue the conversation
|
||||
PreviousResponseID: resp.ID,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating follow-up streaming response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer followUpStream.Close()
|
||||
|
||||
// Create a new accumulator for the follow-up response
|
||||
followUpAccumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Process the follow-up streaming response
|
||||
fmt.Println("Follow-up streaming response:")
|
||||
for {
|
||||
chunk, err := followUpStream.Recv()
|
||||
if err == io.EOF {
|
||||
fmt.Println("\nFollow-up stream closed")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving follow-up chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
followUpAccumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk content if available
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the follow-up accumulator to a response
|
||||
followUpResp := followUpAccumulator.ToResponse()
|
||||
|
||||
// Ask a follow-up question using previous_response_id
|
||||
fmt.Println("\n\nAsking a follow-up question...")
|
||||
followUpQuestion, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.UserInputMessage("How does that compare to the weather in New York?"),
|
||||
},
|
||||
// Use the previous response ID to continue the conversation
|
||||
PreviousResponseID: followUpResp.ID,
|
||||
// Tools are still available from the previous response
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating follow-up question: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer followUpQuestion.Close()
|
||||
|
||||
// Create a new accumulator for the follow-up question
|
||||
questionAccumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Process the follow-up question
|
||||
fmt.Println("Follow-up question response:")
|
||||
for {
|
||||
chunk, err := followUpQuestion.Recv()
|
||||
if err == io.EOF {
|
||||
fmt.Println("\nFollow-up question stream closed")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving follow-up question chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
questionAccumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk content if available
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
|
||||
// Print tool calls if available
|
||||
if len(choice.Delta.ToolCalls) > 0 {
|
||||
for _, toolCall := range choice.Delta.ToolCalls {
|
||||
fmt.Printf("\nTool call received - Function: %s\n", toolCall.Function.Name)
|
||||
if toolCall.Function.Arguments != "" {
|
||||
fmt.Printf("Arguments: %s\n", toolCall.Function.Arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the question accumulator to a response
|
||||
questionResp := questionAccumulator.ToResponse()
|
||||
|
||||
// Process any tool calls from the follow-up question
|
||||
if len(questionResp.Choices) > 0 && len(questionResp.Choices[0].ToolCalls) > 0 {
|
||||
fmt.Println("\n\nProcessing follow-up tool calls...")
|
||||
|
||||
// Process each tool call
|
||||
for _, toolCall := range questionResp.Choices[0].ToolCalls {
|
||||
if toolCall.Function.Name == "get_weather" {
|
||||
// Parse the function arguments
|
||||
var params WeatherParams
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), ¶ms); err != nil {
|
||||
fmt.Printf("Error parsing function arguments: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Call the function
|
||||
unit := params.Unit
|
||||
if unit == "" {
|
||||
unit = "celsius"
|
||||
}
|
||||
result := getWeather(params.Location, unit)
|
||||
fmt.Printf("\nFunction result: %s\n", result)
|
||||
|
||||
// Create a final response with the function result
|
||||
fmt.Println("\nCreating final response with function result...")
|
||||
finalStream, err := client.Responses.CreateStream(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.SystemInputMessage(fmt.Sprintf("Function get_weather returned: %s", result)),
|
||||
},
|
||||
// Use the previous response ID to continue the conversation
|
||||
PreviousResponseID: questionResp.ID,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating final response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer finalStream.Close()
|
||||
|
||||
// Create a new accumulator for the final response
|
||||
finalAccumulator := &openairesponses.ResponsesStreamAccumulator{}
|
||||
|
||||
// Process the final response
|
||||
fmt.Println("Final response:")
|
||||
for {
|
||||
chunk, err := finalStream.Recv()
|
||||
if err == io.EOF {
|
||||
fmt.Println("\nFinal stream closed")
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error receiving final chunk: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Add the chunk to the accumulator
|
||||
finalAccumulator.AddChunk(chunk)
|
||||
|
||||
// Print the chunk content if available
|
||||
for _, choice := range chunk.Choices {
|
||||
if choice.Delta.Content != "" {
|
||||
fmt.Print(choice.Delta.Content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the final accumulator to a response
|
||||
finalResp := finalAccumulator.ToResponse()
|
||||
|
||||
// Print usage information if available
|
||||
if finalResp.Usage != nil {
|
||||
fmt.Printf("\n\nFinal usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", finalResp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", finalResp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", finalResp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print usage information if available
|
||||
if questionResp.Usage != nil {
|
||||
fmt.Printf("\nFollow-up question usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", questionResp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", questionResp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", questionResp.Usage.TotalTokens)
|
||||
}
|
||||
|
||||
// Print usage information if available
|
||||
if followUpResp.Usage != nil {
|
||||
fmt.Printf("\nFollow-up usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", followUpResp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", followUpResp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", followUpResp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print usage information if available
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nInitial usage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
125
examples/tools/main.go
Normal file
125
examples/tools/main.go
Normal file
@ -0,0 +1,125 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
openairesponses "github.com/gosticks/openai-responses-api-go"
|
||||
)
|
||||
|
||||
// WeatherParams represents the parameters for the weather function
|
||||
type WeatherParams struct {
|
||||
Location string `json:"location"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
}
|
||||
|
||||
// getWeather is a mock function to get the weather
|
||||
func getWeather(location, unit string) string {
|
||||
// In a real application, this would call a weather API
|
||||
return fmt.Sprintf("The weather in %s is sunny and 25 degrees %s.", location, unit)
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Get API key from environment variable
|
||||
apiKey := os.Getenv("OPENAI_API_KEY")
|
||||
if apiKey == "" {
|
||||
fmt.Println("OPENAI_API_KEY environment variable is not set")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create a new client
|
||||
client := openairesponses.NewClient(apiKey)
|
||||
|
||||
// Define the weather function parameters schema
|
||||
weatherParamsSchema := map[string]interface{}{
|
||||
"type": "object",
|
||||
"properties": map[string]interface{}{
|
||||
"location": map[string]interface{}{
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": map[string]interface{}{
|
||||
"type": "string",
|
||||
"enum": []string{"celsius", "fahrenheit"},
|
||||
},
|
||||
},
|
||||
"required": []string{"location"},
|
||||
}
|
||||
|
||||
// Create a new response with a function tool
|
||||
resp, err := client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco?"),
|
||||
},
|
||||
Tools: []openairesponses.ResponseTool{
|
||||
openairesponses.NewFunctionTool(
|
||||
"get_weather",
|
||||
"Get the current weather in a given location",
|
||||
weatherParamsSchema,
|
||||
),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Check if the model wants to call a function
|
||||
if len(resp.Choices) > 0 && len(resp.Choices[0].ToolCalls) > 0 {
|
||||
// Get the function call
|
||||
toolCall := resp.Choices[0].ToolCalls[0]
|
||||
if toolCall.Function.Name == "get_weather" {
|
||||
// Parse the function arguments
|
||||
var params WeatherParams
|
||||
if err := json.Unmarshal([]byte(toolCall.Function.Arguments), ¶ms); err != nil {
|
||||
fmt.Printf("Error parsing function arguments: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Call the function
|
||||
unit := params.Unit
|
||||
if unit == "" {
|
||||
unit = "celsius"
|
||||
}
|
||||
result := getWeather(params.Location, unit)
|
||||
|
||||
// Create a new response with the function result
|
||||
resp, err = client.Responses.Create(
|
||||
context.Background(),
|
||||
openairesponses.ResponseRequest{
|
||||
Model: "gpt-4o",
|
||||
Input: []openairesponses.ResponseInputMessage{
|
||||
openairesponses.DeveloperInputMessage("You are a helpful assistant."),
|
||||
openairesponses.UserInputMessage("What's the weather like in San Francisco?"),
|
||||
},
|
||||
// In a real application, you would include the tool calls in the messages field
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating response: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print the function result
|
||||
fmt.Printf("Function result: %s\n", result)
|
||||
}
|
||||
}
|
||||
|
||||
// Print the response
|
||||
fmt.Printf("Response: %s\n", resp.Choices[0].Message.Content)
|
||||
|
||||
// Print the usage information if available
|
||||
if resp.Usage != nil {
|
||||
fmt.Printf("\nUsage information:\n")
|
||||
fmt.Printf(" Prompt tokens: %d\n", resp.Usage.PromptTokens)
|
||||
fmt.Printf(" Completion tokens: %d\n", resp.Usage.CompletionTokens)
|
||||
fmt.Printf(" Total tokens: %d\n", resp.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
3
go.mod
Normal file
3
go.mod
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/gosticks/openai-responses-api-go
|
||||
|
||||
go 1.24.1
|
||||
292
models/models.go
Normal file
292
models/models.go
Normal file
@ -0,0 +1,292 @@
|
||||
package models
|
||||
|
||||
import "time"
|
||||
|
||||
// Usage represents the usage statistics for an API request
|
||||
type Usage struct {
|
||||
PromptTokens int `json:"prompt_tokens"`
|
||||
CompletionTokens int `json:"completion_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
|
||||
// ResponseMessage represents a message in a response
|
||||
type ResponseMessage struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
// ResponseTool represents a tool that can be used in a response
|
||||
type ResponseTool struct {
|
||||
Type string `json:"type"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Parameters any `json:"parameters,omitempty"`
|
||||
Function *ResponseToolFunction `json:"function,omitempty"`
|
||||
VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
|
||||
MaxNumResults int `json:"max_num_results,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseToolFunction represents a function definition for a tool
|
||||
type ResponseToolFunction struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
Parameters any `json:"parameters"`
|
||||
VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseToolCall represents a tool call in a response
|
||||
type ResponseToolCall struct {
|
||||
ID string `json:"id"`
|
||||
CallID string `json:"call_id,omitempty"` // Alias for ID, for compatibility
|
||||
Type string `json:"type"`
|
||||
Function struct {
|
||||
Name string `json:"name"`
|
||||
Arguments string `json:"arguments"`
|
||||
} `json:"function"`
|
||||
}
|
||||
|
||||
// ResponseChoice represents a choice in a response
|
||||
type ResponseChoice struct {
|
||||
Index int `json:"index"`
|
||||
Message ResponseMessage `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
ToolCalls []ResponseToolCall `json:"tool_calls,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseInputMessage represents a message in the input field
|
||||
type ResponseInputMessage struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
CallID string `json:"call_id,omitempty"`
|
||||
Output string `json:"output,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseRequest represents a request to the Responses API
|
||||
type ResponseRequest struct {
|
||||
// Model is the model to use for the response
|
||||
Model string `json:"model"`
|
||||
// Messages is the list of messages to send to the model (deprecated, use Input instead)
|
||||
Messages []ResponseMessage `json:"messages,omitempty"`
|
||||
// Input is the list of messages to send to the model
|
||||
Input []ResponseInputMessage `json:"input"`
|
||||
// Tools is the list of tools the model can use
|
||||
Tools []ResponseTool `json:"tools,omitempty"`
|
||||
// ToolChoice is the tool choice for the model
|
||||
ToolChoice any `json:"tool_choice,omitempty"`
|
||||
// Temperature is the sampling temperature to use
|
||||
Temperature float32 `json:"temperature,omitempty"`
|
||||
// TopP is the nucleus sampling parameter
|
||||
TopP float32 `json:"top_p,omitempty"`
|
||||
// N is the number of responses to generate
|
||||
N int `json:"n,omitempty"`
|
||||
// Stream indicates whether to stream the response
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
// MaxTokens is the maximum number of tokens to generate (deprecated, use MaxOutputTokens instead)
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
// MaxOutputTokens is an upper bound for the number of tokens that can be generated for a response
|
||||
MaxOutputTokens int `json:"max_output_tokens,omitempty"`
|
||||
// PreviousResponseID is the unique ID of the previous response to the model, used for multi-turn conversations
|
||||
PreviousResponseID string `json:"previous_response_id,omitempty"`
|
||||
// Instructions inserts a system (or developer) message as the first item in the model's context
|
||||
Instructions string `json:"instructions,omitempty"`
|
||||
// User is the user ID for the request
|
||||
User string `json:"user,omitempty"`
|
||||
// Store indicates whether to store the response in the system
|
||||
Store bool `json:"store,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseResponse represents a response from the Responses API
|
||||
type ResponseResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ResponseChoice `json:"choices"`
|
||||
Usage *Usage `json:"usage,omitempty"`
|
||||
OutputText string `json:"output_text,omitempty"` // Alias for first choice's content
|
||||
}
|
||||
|
||||
// GetOutputText returns the content of the first choice's message
|
||||
func (r ResponseResponse) GetOutputText() string {
|
||||
if len(r.Choices) == 0 || r.Choices[0].Message.Content == "" {
|
||||
return ""
|
||||
}
|
||||
return r.Choices[0].Message.Content
|
||||
}
|
||||
|
||||
// ResponseStreamChoice represents a choice in a streaming response
|
||||
type ResponseStreamChoice struct {
|
||||
Index int `json:"index"`
|
||||
Delta ResponseStreamDelta `json:"delta"`
|
||||
FinishReason string `json:"finish_reason,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseStreamDelta represents a delta in a streaming response
|
||||
type ResponseStreamDelta struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
ToolCalls []ResponseToolCall `json:"tool_calls,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseStreamResponse represents a streaming response from the Responses API
|
||||
type ResponseStreamResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []ResponseStreamChoice `json:"choices"`
|
||||
Usage *Usage `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
// ResponseState represents the state of a response
|
||||
type ResponseState struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Messages []ResponseMessage `json:"messages"`
|
||||
}
|
||||
|
||||
// ResponseStateRequest represents a request to create a response state
|
||||
type ResponseStateRequest struct {
|
||||
Messages []ResponseMessage `json:"messages"`
|
||||
}
|
||||
|
||||
// ResponseStateResponse represents a response from creating a response state
|
||||
type ResponseStateResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Messages []ResponseMessage `json:"messages"`
|
||||
}
|
||||
|
||||
// WebSearchTool represents the web search tool
|
||||
type WebSearchTool struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// FileSearchTool represents the file search tool
|
||||
type FileSearchTool struct {
|
||||
Type string `json:"type"`
|
||||
VectorStoreIDs []string `json:"vector_store_ids,omitempty"`
|
||||
MaxNumResults int `json:"max_num_results,omitempty"`
|
||||
}
|
||||
|
||||
// ComputerUseTool represents the computer use tool
|
||||
type ComputerUseTool struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// NewWebSearchTool creates a new web search tool
|
||||
func NewWebSearchTool() ResponseTool {
|
||||
return ResponseTool{
|
||||
Type: "web_search",
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileSearchTool creates a new file search tool
|
||||
func NewFileSearchTool(vectorStoreIDs []string, maxNumResults int) ResponseTool {
|
||||
return ResponseTool{
|
||||
Type: "file_search",
|
||||
VectorStoreIDs: vectorStoreIDs,
|
||||
MaxNumResults: maxNumResults,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFileSearchToolWithIDs creates a new file search tool with just vector store IDs
|
||||
func NewFileSearchToolWithIDs(vectorStoreIDs ...string) ResponseTool {
|
||||
return ResponseTool{
|
||||
Type: "file_search",
|
||||
VectorStoreIDs: vectorStoreIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// NewComputerUseTool creates a new computer use tool
|
||||
func NewComputerUseTool() ResponseTool {
|
||||
return ResponseTool{
|
||||
Type: "computer_use",
|
||||
}
|
||||
}
|
||||
|
||||
// NewFunctionTool creates a new function tool
|
||||
func NewFunctionTool(name, description string, parameters any) ResponseTool {
|
||||
return ResponseTool{
|
||||
Type: "function",
|
||||
Name: name,
|
||||
Description: description,
|
||||
Parameters: parameters,
|
||||
}
|
||||
}
|
||||
|
||||
// UserMessage creates a new user message
|
||||
func UserMessage(content string) ResponseMessage {
|
||||
return ResponseMessage{
|
||||
Role: "user",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// SystemMessage creates a new system message
|
||||
func SystemMessage(content string) ResponseMessage {
|
||||
return ResponseMessage{
|
||||
Role: "system",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// AssistantMessage creates a new assistant message
|
||||
func AssistantMessage(content string) ResponseMessage {
|
||||
return ResponseMessage{
|
||||
Role: "assistant",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// ToolMessage creates a new tool message
|
||||
func ToolMessage(content string, toolCallID string) ResponseMessage {
|
||||
return ResponseMessage{
|
||||
Role: "tool",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// UserInputMessage creates a new user input message
|
||||
func UserInputMessage(content string) ResponseInputMessage {
|
||||
return ResponseInputMessage{
|
||||
Role: "user",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// DeveloperInputMessage creates a new developer input message
|
||||
func DeveloperInputMessage(content string) ResponseInputMessage {
|
||||
return ResponseInputMessage{
|
||||
Role: "developer",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// SystemInputMessage creates a new system input message
|
||||
func SystemInputMessage(content string) ResponseInputMessage {
|
||||
return ResponseInputMessage{
|
||||
Role: "system",
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
|
||||
// FunctionCallOutputMessage creates a new function call output message
|
||||
func FunctionCallOutputMessage(callID string, output string) ResponseInputMessage {
|
||||
return ResponseInputMessage{
|
||||
Type: "function_call_output",
|
||||
CallID: callID,
|
||||
Output: output,
|
||||
}
|
||||
}
|
||||
|
||||
// GetCallID returns the call_id, using ID if CallID is empty
|
||||
func (tc ResponseToolCall) GetCallID() string {
|
||||
if tc.CallID != "" {
|
||||
return tc.CallID
|
||||
}
|
||||
return tc.ID
|
||||
}
|
||||
113
openai.go
Normal file
113
openai.go
Normal file
@ -0,0 +1,113 @@
|
||||
package openairesponses
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gosticks/openai-responses-api-go/client"
|
||||
"github.com/gosticks/openai-responses-api-go/models"
|
||||
)
|
||||
|
||||
// Client is the client for the OpenAI Responses API
|
||||
type Client struct {
|
||||
// Responses is the client for the Responses API
|
||||
Responses *client.Responses
|
||||
}
|
||||
|
||||
// NewClient creates a new OpenAI Responses API client
|
||||
func NewClient(apiKey string, options ...client.ClientOption) *Client {
|
||||
// Create the base client
|
||||
baseClient := client.NewClient(append([]client.ClientOption{
|
||||
client.WithAPIKey(apiKey),
|
||||
}, options...)...)
|
||||
|
||||
// Create the responses client
|
||||
responsesClient := client.NewResponses(baseClient)
|
||||
|
||||
return &Client{
|
||||
Responses: responsesClient,
|
||||
}
|
||||
}
|
||||
|
||||
// WithBaseURL sets the base URL for the client
|
||||
func WithBaseURL(baseURL string) client.ClientOption {
|
||||
return client.WithBaseURL(baseURL)
|
||||
}
|
||||
|
||||
// WithHTTPClient sets the HTTP client for the client
|
||||
func WithHTTPClient(httpClient *http.Client) client.ClientOption {
|
||||
return client.WithHTTPClient(httpClient)
|
||||
}
|
||||
|
||||
// WithUserAgent sets the user agent for the client
|
||||
func WithUserAgent(userAgent string) client.ClientOption {
|
||||
return client.WithUserAgent(userAgent)
|
||||
}
|
||||
|
||||
// WithOrganization sets the organization ID for the client
|
||||
func WithOrganization(organization string) client.ClientOption {
|
||||
return client.WithOrganization(organization)
|
||||
}
|
||||
|
||||
// Export models
|
||||
type (
|
||||
// ResponseMessage represents a message in a response
|
||||
ResponseMessage = models.ResponseMessage
|
||||
// ResponseTool represents a tool that can be used in a response
|
||||
ResponseTool = models.ResponseTool
|
||||
// ResponseToolFunction represents a function definition for a tool
|
||||
ResponseToolFunction = models.ResponseToolFunction
|
||||
// ResponseToolCall represents a tool call in a response
|
||||
ResponseToolCall = models.ResponseToolCall
|
||||
// ResponseChoice represents a choice in a response
|
||||
ResponseChoice = models.ResponseChoice
|
||||
// ResponseRequest represents a request to the Responses API
|
||||
ResponseRequest = models.ResponseRequest
|
||||
// ResponseResponse represents a response from the Responses API
|
||||
ResponseResponse = models.ResponseResponse
|
||||
// ResponseStreamResponse represents a streaming response from the Responses API
|
||||
ResponseStreamResponse = models.ResponseStreamResponse
|
||||
// ResponseState represents the state of a response
|
||||
ResponseState = models.ResponseState
|
||||
// ResponseStateRequest represents a request to create a response state
|
||||
ResponseStateRequest = models.ResponseStateRequest
|
||||
// ResponseStateResponse represents a response from creating a response state
|
||||
ResponseStateResponse = models.ResponseStateResponse
|
||||
// Usage represents the usage statistics for an API request
|
||||
Usage = models.Usage
|
||||
// ResponsesStream is a stream of responses
|
||||
ResponsesStream = client.ResponsesStream
|
||||
// ResponsesStreamAccumulator accumulates streaming responses
|
||||
ResponsesStreamAccumulator = client.ResponsesStreamAccumulator
|
||||
// ResponseInputMessage represents a message in the input field
|
||||
ResponseInputMessage = models.ResponseInputMessage
|
||||
)
|
||||
|
||||
// Export helper functions
|
||||
var (
|
||||
// UserMessage creates a new user message
|
||||
UserMessage = models.UserMessage
|
||||
// SystemMessage creates a new system message
|
||||
SystemMessage = models.SystemMessage
|
||||
// AssistantMessage creates a new assistant message
|
||||
AssistantMessage = models.AssistantMessage
|
||||
// ToolMessage creates a new tool message
|
||||
ToolMessage = models.ToolMessage
|
||||
// NewWebSearchTool creates a new web search tool
|
||||
NewWebSearchTool = models.NewWebSearchTool
|
||||
// NewFileSearchTool creates a new file search tool with vector store IDs and max results
|
||||
NewFileSearchTool = models.NewFileSearchTool
|
||||
// NewFileSearchToolWithIDs creates a new file search tool with just vector store IDs
|
||||
NewFileSearchToolWithIDs = models.NewFileSearchToolWithIDs
|
||||
// NewComputerUseTool creates a new computer use tool
|
||||
NewComputerUseTool = models.NewComputerUseTool
|
||||
// NewFunctionTool creates a new function tool
|
||||
NewFunctionTool = models.NewFunctionTool
|
||||
// UserInputMessage creates a new user input message
|
||||
UserInputMessage = models.UserInputMessage
|
||||
// DeveloperInputMessage creates a new developer input message
|
||||
DeveloperInputMessage = models.DeveloperInputMessage
|
||||
// SystemInputMessage creates a new system input message
|
||||
SystemInputMessage = models.SystemInputMessage
|
||||
// FunctionCallOutputMessage creates a new function call output message
|
||||
FunctionCallOutputMessage = models.FunctionCallOutputMessage
|
||||
)
|
||||
Loading…
Reference in New Issue
Block a user