claude

package module
v0.0.0-...-dd22fb7 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 16, 2024 License: MIT Imports: 3 Imported by: 3

README

Go client library for Anthropic's Claude

This is an unofficial client library for Anthropic's Claude. This project is not affiliated with Anthropic PBC.

Package Layout

The github.com/psanford/claude package contains the API request and response message definitions. These are shared across the different API providers (Anthropic, AWS/Bedrock, GCP/Vertex).

  • github.com/psanford/claude/anthropic contains an API client for using Anthropic's API.
  • github.com/psanford/claude/bedrock contains an API client for using Claude in AWS Bedrock.
  • github.com/psanford/claude/vertex contains an API client for using Claude in GCP Vertex.

Examples:

Design

The goal of this package is to give a consistent client experience across the different model hosting providers and across streaming vs non-streaming responses.

The Bedrock API mostly takes requests in the same shape as Anthropic's first party API, but not fully. The model IDs are different and need to be passed to bedrock differently, for example.

This package converts requests in the shape of the Anthropic first party API to the correct form for Bedrock and Vertex.

Likewise, the streaming vs non-streaming APIs are similar but not exactly the same. This package unifies streaming and non-streaming into a single interface so you can have one code path that can handle either.

Example

package main

import (
	"context"
	"flag"
	"fmt"
	"log"
	"os"

	"github.com/aws/aws-sdk-go-v2/config"
	"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
	"github.com/psanford/claude"
	"github.com/psanford/claude/anthropic"
	"github.com/psanford/claude/bedrock"
	"github.com/psanford/claude/clientiface"
)

var apiProvider = flag.String("api", "anthropic", "API provider (anthropic|bedrock")
var stream = flag.Bool("stream", true, "Stream results")

func main() {
	flag.Parse()

	var client clientiface.Client
	if *apiProvider == "anthropic" {
		client = newAnthropicClient()
	} else if *apiProvider == "bedrock" {
		client = newBedrockClient()
	} else {
		log.Fatalf("Invalid api provider. Valid options are anthropic or bedrock")
	}

	err := makeRequestAndHandleResponse(client)
	if err != nil {
		log.Fatalf("Error: %v", err)
	}
}

func newAnthropicClient() clientiface.Client {
	apiKey := os.Getenv("CLAUDE_API_KEY")
	if apiKey == "" {
		log.Fatal("CLAUDE_API_KEY environment variable is not set")
	}
	return anthropic.NewClient(apiKey)
}

func newBedrockClient() clientiface.Client {
	cfg, err := config.LoadDefaultConfig(context.Background())
	if err != nil {
		log.Fatalf("Unable to load AWS SDK config: %v", err)
	}
	bedrockSDK := bedrockruntime.NewFromConfig(cfg)
	return bedrock.NewClient(bedrockSDK)
}

func makeRequestAndHandleResponse(client clientiface.Client) error {
	req := &claude.MessageRequest{
		Model:     claude.Claude3Haiku,
		MaxTokens: 1000,
		Stream:    *stream, // Toggle streaming. No other code changes required
		Messages: []claude.MessageTurn{
			{
				Role: "user",
				Content: []claude.TurnContent{
					claude.TextContent("What are three interesting facts about Go programming?"),
				},
			},
		},
	}

	resp, err := client.Message(context.Background(), req)
	if err != nil {
		return fmt.Errorf("error calling Claude API: %w", err)
	}

	for event := range resp.Responses() {
		if err, isErr := event.Data.(error); isErr {
			return err
		}

		fmt.Print(event.Data.Text())
	}

	return nil
}

Documentation

Overview

Client APIs for Anthropic's Claude

Index

Constants

View Source
const (
	TurnText       = "text"
	TurnImage      = "image"
	TurnToolUse    = "tool_use"
	TurnToolResult = "tool_result"
)
View Source
const (
	Claude3Dot5SonnetLatest = "claude-3-5-sonnet-latest"
	Claude3Dot5HaikuLatest  = "claude-3-5-haiku-latest"
	Claude3OpusLatest       = "claude-3-opus-latest"

	Claude3Dot5Sonnet2410 = "claude-3-5-sonnet-20241022"
	Claude3Dot5Sonnet     = "claude-3-5-sonnet-20240620"
	Claude3Dot5Haiku      = "claude-3-5-haiku-20241022"
	Claude3Opus           = "claude-3-opus-20240229"
	Claude3Sonnet         = "claude-3-sonnet-20240229"
	Claude3Haiku          = "claude-3-haiku-20240307"
	Claude2Dot1           = "claude-2.1"
	Clause2Dot0           = "claude-2.0"
	Claude1Dot2Instant    = "claude-instant-1.2"
)
View Source
const (
	RoleUser      = "user"
	RoleAssistant = "assistant"
)

Variables

This section is empty.

Functions

func CurrentModels

func CurrentModels() []string

func Models

func Models() []string

Types

type ClaudeError

type ClaudeError struct {
	Err struct {
		Type    string `json:"type"`
		Message string `json:"message"`
		Details string `json:"details"`
	} `json:"error"`
}

func (ClaudeError) Error

func (c ClaudeError) Error() string

func (*ClaudeError) Text

func (c *ClaudeError) Text() string

type ClientError

type ClientError struct {
	// contains filtered or unexported fields
}

func NewClientError

func NewClientError(err error) *ClientError

func (*ClientError) Error

func (c *ClientError) Error() string

func (*ClientError) Text

func (c *ClientError) Text() string

type ContentBlockDelta

type ContentBlockDelta struct {
	Delta struct {
		Text        string `json:"text"`
		PartialJson string `json:"partial_json"`
		Type        string `json:"type"`
	} `json:"delta"`
	Index int64 `json:"index"`
}

func (*ContentBlockDelta) Text

func (c *ContentBlockDelta) Text() string

type ContentBlockStart

type ContentBlockStart struct {
	ContentBlock struct {
		Text string `json:"text"`
		Type string `json:"type"`
		Name string `json:"name"`
		ID   string `json:"id"`
	} `json:"content_block"`
	Index int `json:"index"`
}

func (*ContentBlockStart) Text

func (c *ContentBlockStart) Text() string

type ContentBlockStop

type ContentBlockStop struct {
	Index int64 `json:"index"`
}

func (*ContentBlockStop) Text

func (c *ContentBlockStop) Text() string

type MessageContent

type MessageContent interface {
	Text() string
}

type MessageDelta

type MessageDelta struct {
	Delta struct {
		StopReason   string  `json:"stop_reason"`
		StopSequence *string `json:"stop_sequence"`
	} `json:"delta"`
	Usage struct {
		OutputTokens int64 `json:"output_tokens"`
	} `json:"usage"`
}

func (*MessageDelta) Text

func (c *MessageDelta) Text() string

type MessageEvent

type MessageEvent struct {
	Type string
	Data MessageContent
}

type MessagePing

type MessagePing struct {
}

func (*MessagePing) Text

func (c *MessagePing) Text() string

type MessageRequest

type MessageRequest struct {
	// The model that will complete your prompt.
	// Required field except for AWS Bedrock where it must be empty.
	Model string `json:"model,omitempty"`
	// Input messages.
	// Models are trained to operate on alternating user and assistant conversational turns.
	// When creating a new Message, you specify the prior conversational turns with the messages parameter,
	// and the model then generates the next Message in the conversation.
	// Each input message must be an object with a role and content. You can specify a single user-role message,
	// or you can include multiple user and assistant messages. The first message must always use the user role.
	// If the final message uses the assistant role, the response content will continue immediately from the
	// content in that message. This can be used to constrain part of the model's response.
	Messages []MessageTurn `json:"messages"`
	// System prompt.
	// A system prompt is a way of providing context and instructions to Claude, such as specifying a particular goal or role.
	System string `json:"system,omitempty"`
	// The maximum number of tokens to generate before stopping.
	// Note that models may stop before reaching this maximum.
	// This parameter only specifies the absolute maximum number of tokens to generate.
	// Different models have different maximum values for this parameter.
	MaxTokens int              `json:"max_tokens"`
	Metadata  *RequestMetadata `json:"metadata,omitempty"`
	// Custom text sequences that will cause the model to stop generating.
	// Models will normally stop when they have naturally completed their turn,
	// which will result in a response stop_reason of "end_turn".
	// If you want the model to stop generating when it encounters custom strings of text,
	// you can use the stop_sequences parameter. If the model encounters one of the custom sequences,
	// the response stop_reason value will be "stop_sequence" and the response stop_sequence value will contain the matched stop sequence.
	StopSequences []string `json:"stop_sequences,omitempty"`
	// Whether to incrementally stream the response using server-sent events.
	Stream bool `json:"stream,omitempty"`
	// Amount of randomness injected into the response.
	// Defaults to 1.0. Ranges from 0.0 to 1.0. Use temperature closer to 0.0 for analytical / multiple choice, and closer to 1.0 for creative and generative tasks.
	// Note that even with temperature of 0.0, the results will not be fully deterministic.
	Temperature *float64 `json:"temperature,omitempty"`
	// Use nucleus sampling.
	// In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent
	// token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p.
	// You should either alter temperature or top_p, but not both.
	// Recommended for advanced use cases only. You usually only need to use temperature.
	TopP *float64 `json:"top_p,omitempty"`
	// Only sample from the top K options for each subsequent token.
	// Used to remove "long tail" low probability responses.
	// Recommended for advanced use cases only. You usually only need to use temperature.
	TopK *int `json:"top_k,omitempty"`
	// AnthropicVersion is used for AWS Bedrock and GCP Vertex.
	// The client implementations in this library will set this for you so you can leave it blank.
	AnthropicVersion string `json:"anthropic_version,omitempty"`
	// How the model should use the provided tools.
	ToolChoice *ToolChoice `json:"tool_choice,omitempty"`
	// Definitions of tools that the model may use.
	Tools []Tool `json:"tools,omitempty"`
}

MessageRequest is a request struct for the messages API. See https://docs.anthropic.com/claude/reference/messages_post for details

type MessageResponse

type MessageResponse interface {
	Responses() <-chan MessageEvent
}

type MessageStart

type MessageStart struct {
	ID           string        `json:"id"`
	Type         string        `json:"type"`
	Role         string        `json:"role"`
	Content      []TurnContent `json:"content"`
	Model        string        `json:"model"`
	StopReason   string        `json:"stop_reason"`
	StopSequence *string       `json:"stop_sequence"`
	Usage        struct {
		InputTokens  int `json:"input_tokens"`
		OutputTokens int `json:"output_tokens"`
	} `json:"usage"`
}

func (*MessageStart) Text

func (c *MessageStart) Text() string

func (*MessageStart) UnmarshalJSON

func (m *MessageStart) UnmarshalJSON(b []byte) error

type MessageStop

type MessageStop struct {
}

func (*MessageStop) Text

func (c *MessageStop) Text() string

type MessageTurn

type MessageTurn struct {
	Role    string        `json:"role"`
	Content []TurnContent `json:"content"`
}

func (*MessageTurn) UnmarshalJSON

func (m *MessageTurn) UnmarshalJSON(data []byte) error

type RequestMetadata

type RequestMetadata struct {
	// An external identifier for the user who is associated with the request.
	// This should be a uuid, hash value, or other opaque identifier.
	// Anthropic may use this id to help detect abuse.
	// Do not include any identifying information such as name, email address, or phone number.
	UserID string `json:"user_id,omitempty"`
}

type TextCompletion

type TextCompletion struct {
	// The model that will complete your prompt.
	// Required field except for AWS Bedrock where it must be empty.
	Model string `json:"model,omitempty"`
	// The prompt that you want Claude to complete.
	// For proper response generation you will need to format your prompt using alternating
	// \n\nHuman: and \n\nAssistant: conversational turns.
	Prompt string `json:"prompt"`
	// The maximum number of tokens to generate before stopping.
	// Note that models may stop before reaching this maximum.
	// This parameter only specifies the absolute maximum number of tokens to generate.
	MaxTokensToSample int `json:"max_tokens_to_sample"`
	// Sequences that will cause the model to stop generating.
	// Models stop on "\n\nHuman:", and may include additional built-in stop sequences in the future.
	// By providing the stop_sequences parameter, you may include additional strings that will cause the model to stop generating.
	StopSequences []string `json:"stop_sequences,omitempty"`
	// Amount of randomness injected into the response.
	// Defaults to 1.0. Ranges from 0.0 to 1.0. Use temperature closer to 0.0 for analytical / multiple choice,
	// and closer to 1.0 for creative and generative tasks.
	// Note that even with temperature of 0.0, the results will not be fully deterministic.
	Temperature *float64 `json:"temperature,omitempty"`
	// Use nucleus sampling.
	// In nucleus sampling, we compute the cumulative distribution over all the options for each subsequent
	// token in decreasing probability order and cut it off once it reaches a particular probability specified by top_p.
	// You should either alter temperature or top_p, but not both.
	// Recommended for advanced use cases only. You usually only need to use temperature.
	TopP *float64 `json:"top_p,omitempty"`
	// Only sample from the top K options for each subsequent token.
	// Used to remove "long tail" low probability responses.
	// Recommended for advanced use cases only. You usually only need to use temperature
	TopK *int `json:"top_k,omitempty"`
	// An object describing metadata about the request.
	Metadata *RequestMetadata `json:"metadata"`
	// Whether to incrementally stream the response using server-sent events.
	Stream bool `json:"stream,omitempty"`
}

TextCompletion represents the request to the legacy text completions api. This is deprecated. You should use the messages API vis MessageRequest instead. See https://docs.anthropic.com/claude/reference/complete_post for details

type TextCompletionResponse

type TextCompletionResponse struct {
	Type       string `json:"type"`
	ID         string `json:"id"`
	Completion string `json:"completion"`
	StopReason string `json:"stop_reason"`
	Model      string `json:"model"`
}

type Tool

type Tool struct {
	// Name of the tool.
	Name string `json:"name"`
	// Optional description of the tool.
	Description string `json:"description,omitempty"`
	// JSON schema for the tool input shape that the model will produce in tool_use output content blocks.
	InputSchema any `json:"input_schema"`
}

Tool defines a tool that the model may use.

type ToolChoice

type ToolChoice struct {
	// Specifies that the model should use a specific tool.
	Tool string `json:"tool,omitempty"`
	// Specifies that the model should use any available tool.
	Any bool `json:"any,omitempty"`
	// Specifies that the model should decide which tool to use.
	Auto bool `json:"auto,omitempty"`
}

ToolChoice defines how the model should use the provided tools.

type TurnContent

type TurnContent interface {
	Type() string
	TextContent() string
}

func ImageContent

func ImageContent(mediaType string, image []byte) TurnContent

func TextContent

func TextContent(msg string) TurnContent

func ToolResultContent

func ToolResultContent(toolUseID, content string) TurnContent

type TurnContentToolUse

type TurnContentToolUse struct {
	Typ   string      `json:"type"`
	ID    string      `json:"id"`
	Name  string      `json:"name"`
	Input interface{} `json:"input"`
}

func (*TurnContentToolUse) TextContent

func (t *TurnContentToolUse) TextContent() string

func (*TurnContentToolUse) Type

func (t *TurnContentToolUse) Type() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL