Documentation ¶
Index ¶
- Constants
- type Connector
- func (c *Connector) CreateExecution(sysVars map[string]any, connection *structpb.Struct, task string) (*base.ExecutionWrapper, error)
- func (c *Connector) Test(sysVars map[string]any, connection *structpb.Struct) error
- func (c *Connector) UsageHandlerCreator() base.UsageHandlerCreator
- func (c *Connector) WithSecrets(s map[string]any) *Connector
- func (c *Connector) WithUsageHandlerCreator(newUH base.UsageHandlerCreator) *Connector
- type Engine
- type Image
- type ImageTaskRes
- type ImageToImageInput
- type ImageToImageOutput
- type ImageToImageReq
- type TextPrompt
- type TextToImageInput
- type TextToImageOutput
- type TextToImageReq
Constants ¶
const ( TextToImageTask = "TASK_TEXT_TO_IMAGE" ImageToImageTask = "TASK_IMAGE_TO_IMAGE" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Connector ¶
Connector executes queries against StabilityAI.
func (*Connector) CreateExecution ¶
func (c *Connector) CreateExecution(sysVars map[string]any, connection *structpb.Struct, task string) (*base.ExecutionWrapper, error)
CreateExecution initializes a connector executor that can be used in a pipeline trigger.
func (*Connector) UsageHandlerCreator ¶
func (c *Connector) UsageHandlerCreator() base.UsageHandlerCreator
UsageHandlerCreator returns a function to initialize a UsageHandler.
func (*Connector) WithSecrets ¶
WithSecrets loads secrets into the connector, which can be used to configure it with globaly defined parameters.
func (*Connector) WithUsageHandlerCreator ¶
func (c *Connector) WithUsageHandlerCreator(newUH base.UsageHandlerCreator) *Connector
WithUsageHandlerCreator overrides the UsageHandlerCreator method.
type Engine ¶
type Engine struct { Description string `json:"description"` ID string `json:"id"` Name string `json:"name"` Type string `json:"type"` }
Engine represents a Stability AI Engine.
type Image ¶
type Image struct { Base64 string `json:"base64"` Seed uint32 `json:"seed"` FinishReason string `json:"finishReason"` }
Image represents a single image.
type ImageTaskRes ¶
type ImageTaskRes struct {
Images []Image `json:"artifacts"`
}
ImageTaskRes represents the response body for text-to-image API.
type ImageToImageInput ¶
type ImageToImageInput struct { Task string `json:"task"` Engine string `json:"engine"` Prompts []string `json:"prompts"` InitImage string `json:"init_image"` Weights *[]float64 `json:"weights,omitempty"` InitImageMode *string `json:"init_image_mode,omitempty"` ImageStrength *float64 `json:"image_strength,omitempty"` StepScheduleStart *float64 `json:"step_schedule_start,omitempty"` StepScheduleEnd *float64 `json:"step_schedule_end,omitempty"` CfgScale *float64 `json:"cfg_scale,omitempty"` ClipGuidancePreset *string `json:"clip_guidance_preset,omitempty"` Sampler *string `json:"sampler,omitempty"` Samples *uint32 `json:"samples,omitempty"` Seed *uint32 `json:"seed,omitempty"` Steps *uint32 `json:"steps,omitempty"` StylePreset *string `json:"style_preset,omitempty"` }
type ImageToImageOutput ¶
type ImageToImageReq ¶
type ImageToImageReq struct { TextPrompts []TextPrompt `json:"text_prompts" om:"texts[:]"` InitImage string `json:"init_image" om:"images[0]"` CFGScale *float64 `json:"cfg_scale,omitempty" om:"metadata.cfg_scale"` ClipGuidancePreset *string `json:"clip_guidance_preset,omitempty" om:"metadata.clip_guidance_preset"` Sampler *string `json:"sampler,omitempty" om:"metadata.sampler"` Samples *uint32 `json:"samples,omitempty" om:"metadata.samples"` Seed *uint32 `json:"seed,omitempty" om:"metadata.seed"` Steps *uint32 `json:"steps,omitempty" om:"metadata.steps"` StylePreset *string `json:"style_preset,omitempty" om:"metadata.style_preset"` InitImageMode *string `json:"init_image_mode,omitempty" om:"metadata.init_image_mode"` ImageStrength *float64 `json:"image_strength,omitempty" om:"metadata.image_strength"` StepScheduleStart *float64 `json:"step_schedule_start,omitempty" om:"metadata.step_schedule_start"` StepScheduleEnd *float64 `json:"step_schedule_end,omitempty" om:"metadata.step_schedule_end"` // contains filtered or unexported fields }
ImageToImageReq represents the request body for image-to-image API
type TextPrompt ¶
TextPrompt holds a prompt's text and its weight.
type TextToImageInput ¶
type TextToImageInput struct { Task string `json:"task"` Prompts []string `json:"prompts"` Engine string `json:"engine"` Weights *[]float64 `json:"weights,omitempty"` Height *uint32 `json:"height,omitempty"` Width *uint32 `json:"width,omitempty"` CfgScale *float64 `json:"cfg_scale,omitempty"` ClipGuidancePreset *string `json:"clip_guidance_preset,omitempty"` Sampler *string `json:"sampler,omitempty"` Samples *uint32 `json:"samples,omitempty"` Seed *uint32 `json:"seed,omitempty"` Steps *uint32 `json:"steps,omitempty"` StylePreset *string `json:"style_preset,omitempty"` }
type TextToImageOutput ¶
type TextToImageReq ¶
type TextToImageReq struct { TextPrompts []TextPrompt `json:"text_prompts" om:"texts[:]"` CFGScale *float64 `json:"cfg_scale,omitempty" om:"metadata.cfg_scale"` ClipGuidancePreset *string `json:"clip_guidance_preset,omitempty" om:"metadata.clip_guidance_preset"` Sampler *string `json:"sampler,omitempty" om:"metadata.sampler"` Samples *uint32 `json:"samples,omitempty" om:"metadata.samples"` Seed *uint32 `json:"seed,omitempty" om:"metadata.seed"` Steps *uint32 `json:"steps,omitempty" om:"metadata.steps"` StylePreset *string `json:"style_preset,omitempty" om:"metadata.style_preset"` Height *uint32 `json:"height,omitempty" om:"metadata.height"` Width *uint32 `json:"width,omitempty" om:"metadata.width"` // contains filtered or unexported fields }
TextToImageReq represents the request body for text-to-image API