model

package
v1.1.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 16, 2023 License: Apache-2.0 Imports: 31 Imported by: 0

Documentation

Index

Constants

View Source
const (
	JobTypeService = "service"
	JobTypeBatch   = "batch"
	JobTypeOps     = "ops" // TODO: revisit the job naming
	JobTypeSystem  = "system"
)
View Source
const (
	EngineKeyImageDocker                = "Image"
	EngineKeyEntrypointDocker           = "Entrypoint"
	EngineKeyParametersDocker           = "Parameters"
	EngineKeyEnvironmentVariablesDocker = "EnvironmentVariables"
	EngineKeyWorkingDirectoryDocker     = "WorkingDirectory"
)
View Source
const (
	TracerAttributeNameNodeID = "nodeid"
	TracerAttributeNameJobID  = "jobid"
)
View Source
const (
	EngineKeyEntryModuleWasm          = "EntryModule"
	EngineKeyEntrypointWasm           = "Entrypoint"
	EngineKeyParametersWasm           = "Parameters"
	EngineKeyEnvironmentVariablesWasm = "EnvironmentVariables"
	EngineKeyImportModulesWasm        = "ImportModules"
)
View Source
const DefaultJobTimeout time.Duration = time.Duration(0)

The user did not specify a timeout or explicitly requested that the job should receive the longest possible timeout. This value will be changed by the node into whatever is configured as max timeout.

View Source
const (
	// DefaultNamespace is the default namespace.
	DefaultNamespace = "default"
)
View Source
const MaxNumberOfObjectsToSerialize = 1000

Arbitrarily choosing 1000 jobs to serialize - this is a pretty high

View Source
const MaxSerializedStringInput = int(10 * datasize.MB)
View Source
const Unknown = "unknown"

Unknown constant to represent unknown engines, publishers, storage sources and the more unknowns in this world!

Variables

View Source
var (
	IncludeAny  = make([]string, 0)
	ExcludeNone = make([]string, 0)
)

Set of annotations that will not do any filtering of jobs.

View Source
var NoJobTimeout time.Duration = time.Duration(math.MaxInt64).Truncate(time.Second)

NoJobTimeout specifies that the job should not be subject to timeouts. This value is the largest possible time.Duration that is a whole number of seconds so conversions into an int64 number of seconds and back again are bijective.

Functions

func ConfirmMaxSliceSize

func ConfirmMaxSliceSize[T any](t T, maxSize int) error

func ConvertBytesString added in v1.0.4

func ConvertBytesString(val string) uint64

func ConvertCPUString added in v1.0.4

func ConvertCPUString(val string) float64

func ConvertGPUString added in v1.0.4

func ConvertGPUString(val string) uint64

func DecodeEngineSpec added in v1.0.4

func DecodeEngineSpec[T any](spec EngineSpec) (*T, error)

DecodeEngineSpec is a generic function that accepts an EngineSpec object. It marshals the EngineSpec Params into JSON and then unmarshals the JSON into a new object of type T. The function returns a pointer to the new object and an error object. If there is any issue during the JSON marshaling or unmarshaling, the function will return an error. TODO the double json marshaling here is inefficient, we can implement explicit per field decoding if required.

func EngineNames

func EngineNames() []string

func FromLabelSelectorRequirements

func FromLabelSelectorRequirements(requirements ...LabelSelectorRequirement) ([]labels.Requirement, error)

func IsValidEngine

func IsValidEngine(e Engine) bool

func IsValidPublisher

func IsValidPublisher(publisherType Publisher) bool

func IsValidStorageSourceType

func IsValidStorageSourceType(sourceType StorageSourceType) bool

func JSONMarshalIndentWithMax

func JSONMarshalIndentWithMax[T any](t T, indentSpaces int) ([]byte, error)

func JSONMarshalWithMax

func JSONMarshalWithMax[T any](t T) ([]byte, error)

func JSONUnmarshalWithMax

func JSONUnmarshalWithMax[T any](b []byte, t *T) error

func PublisherNames

func PublisherNames() []string

func Reinterpret

func Reinterpret[T any](node datamodel.Node, schema *Schema) (*T, error)

Reinterpret re-parses the datamodel.Node as an object of the defined type.

func StorageSourceNames

func StorageSourceNames() []string

func UnmarshalIPLD

func UnmarshalIPLD[T any](b []byte, decoder codec.Decoder, schema *Schema) (*T, error)

UnmarshalIPLD parses the given bytes as a Go object using the passed decoder. Returns an error if the object cannot be parsed.

func YAMLMarshalWithMax

func YAMLMarshalWithMax[T any](t T) ([]byte, error)

func YAMLUnmarshalWithMax

func YAMLUnmarshalWithMax[T any](b []byte, t *T) error

Types

type APIVersion

type APIVersion int
const (

	// V1alpha1 Deprecated but left here to preserve enum ordering
	V1alpha1 APIVersion
	// V1beta1 is Deprecated but left here to preserve enum ordering
	V1beta1
	// V1beta2 is the current API version
	V1beta2
)

func APIVersionLatest

func APIVersionLatest() APIVersion

func ParseAPIVersion

func ParseAPIVersion(str string) (APIVersion, error)

func (APIVersion) String

func (i APIVersion) String() string

type BacalhauConfig

type BacalhauConfig struct {
	Publisher   Publisher
	Timeout     time.Duration
	Resources   ResourceSpec
	Annotations []string
	Dnt         bool
}

type BuildVersionInfo

type BuildVersionInfo struct {
	Major      string    `json:"major,omitempty" example:"0"`
	Minor      string    `json:"minor,omitempty" example:"3"`
	GitVersion string    `json:"gitversion" example:"v0.3.12"`
	GitCommit  string    `json:"gitcommit" example:"d612b63108f2b5ce1ab2b9e02444eb1dac1d922d"`
	BuildDate  time.Time `json:"builddate" example:"2022-11-16T14:03:31Z"`
	GOOS       string    `json:"goos" example:"linux"`
	GOARCH     string    `json:"goarch" example:"amd64"`
}

BuildVersionInfo is the version of a Bacalhau binary (either client or server)

type ComputeNodeInfo

type ComputeNodeInfo struct {
	ExecutionEngines   []string          `json:"ExecutionEngines"`
	Publishers         []string          `json:"Publishers"`
	StorageSources     []string          `json:"StorageSources"`
	MaxCapacity        ResourceUsageData `json:"MaxCapacity"`
	AvailableCapacity  ResourceUsageData `json:"AvailableCapacity"`
	MaxJobRequirements ResourceUsageData `json:"MaxJobRequirements"`
	RunningExecutions  int               `json:"RunningExecutions"`
	EnqueuedExecutions int               `json:"EnqueuedExecutions"`
}

type ComputeNodeInfoProvider

type ComputeNodeInfoProvider interface {
	GetComputeInfo(ctx context.Context) ComputeNodeInfo
}

type Deal

type Deal struct {
	// Whether the job should be run on any matching node (false) or all
	// matching nodes (true). If true, other fields in this struct are ignored.
	TargetingMode TargetingMode `json:"TargetingMode,omitempty"`
	// The maximum number of concurrent compute node bids that will be
	// accepted by the requester node on behalf of the client.
	Concurrency int `json:"Concurrency,omitempty"`
}

The deal the client has made with the bacalhau network. This is updateable by the client who submitted the job

func (Deal) GetConcurrency added in v0.3.25

func (d Deal) GetConcurrency() int

GetConcurrency returns the concurrency value from the deal

func (Deal) IsValid added in v1.0.4

func (d Deal) IsValid() error

type DebugInfo

type DebugInfo struct {
	Component string      `json:"component"`
	Info      interface{} `json:"info"`
}

type DebugInfoProvider

type DebugInfoProvider interface {
	GetDebugInfo(ctx context.Context) (DebugInfo, error)
}

type DockerEngineBuilder added in v1.0.4

type DockerEngineBuilder struct {
	// contains filtered or unexported fields
}

DockerEngineBuilder is a struct that is used for constructing an EngineSpec object specifically for Docker engines using the Builder pattern. It embeds an EngineBuilder object for handling the common builder methods.

func NewDockerEngineBuilder added in v1.0.4

func NewDockerEngineBuilder(image string) *DockerEngineBuilder

NewDockerEngineBuilder function initializes a new DockerEngineBuilder instance. It sets the engine type to model.EngineDocker.String() and image as per the input argument.

func (*DockerEngineBuilder) Build added in v1.0.4

func (b *DockerEngineBuilder) Build() EngineSpec

Build method constructs the final EngineSpec object by calling the embedded EngineBuilder's Build method.

func (*DockerEngineBuilder) WithEntrypoint added in v1.0.4

func (b *DockerEngineBuilder) WithEntrypoint(e ...string) *DockerEngineBuilder

WithEntrypoint is a builder method that sets the Docker engine entrypoint. It returns the DockerEngineBuilder for further chaining of builder methods.

func (*DockerEngineBuilder) WithEnvironmentVariables added in v1.0.4

func (b *DockerEngineBuilder) WithEnvironmentVariables(e ...string) *DockerEngineBuilder

WithEnvironmentVariables is a builder method that sets the Docker engine's environment variables. It returns the DockerEngineBuilder for further chaining of builder methods.

func (*DockerEngineBuilder) WithParameters added in v1.0.4

func (b *DockerEngineBuilder) WithParameters(e ...string) *DockerEngineBuilder

WithParameters is a builder method that sets the Docker engine's parameters. It returns the DockerEngineBuilder for further chaining of builder methods.

func (*DockerEngineBuilder) WithWorkingDirectory added in v1.0.4

func (b *DockerEngineBuilder) WithWorkingDirectory(e string) *DockerEngineBuilder

WithWorkingDirectory is a builder method that sets the Docker engine's working directory. It returns the DockerEngineBuilder for further chaining of builder methods.

type DockerEngineSpec added in v1.0.4

type DockerEngineSpec struct {
	// Image this should be pullable by docker
	Image string `json:"Image,omitempty"`
	// Entrypoint optionally override the default entrypoint
	Entrypoint []string `json:"Entrypoint,omitempty"`
	// Parameters holds additional commandline arguments
	Parameters []string `json:"Parameters,omitempty"`
	// EnvironmentVariables is a slice of env to run the container with
	EnvironmentVariables []string `json:"EnvironmentVariables,omitempty"`
	// WorkingDirectory inside the container
	WorkingDirectory string `json:"WorkingDirectory,omitempty"`
}

DockerEngineSpec contains necessary parameters to execute a docker job.

type DockerInputs

type DockerInputs struct {
	Entrypoint []string
	Workdir    string
	Mounts     IPLDMap[string, Resource]
	Outputs    IPLDMap[string, datamodel.Node]
	Env        IPLDMap[string, string]
}

func (DockerInputs) UnmarshalInto

func (docker DockerInputs) UnmarshalInto(with string, spec *Spec) error

type Engine

type Engine int
const (
	EngineNoop Engine
	EngineDocker
	EngineWasm
)

func EngineTypes

func EngineTypes() []Engine

func ParseEngine

func ParseEngine(str string) Engine

ParseEngine will either return a valid engine type or `engineUnknown`

func (Engine) MarshalText

func (e Engine) MarshalText() ([]byte, error)

func (Engine) String

func (e Engine) String() string

String returns string representation of the engine type. Don't use stringer tool as it doesn't generate camel-case strings

func (*Engine) UnmarshalText

func (e *Engine) UnmarshalText(text []byte) (err error)

type EngineBuilder added in v1.0.4

type EngineBuilder struct {
	// contains filtered or unexported fields
}

EngineBuilder is a struct used for constructing an EngineSpec object using the Builder pattern. The options field is a slice of functions that modify the EngineSpec object.

func NewEngineBuilder added in v1.0.4

func NewEngineBuilder() *EngineBuilder

NewEngineBuilder function creates a new instance of the EngineBuilder.

func (*EngineBuilder) Build added in v1.0.4

func (b *EngineBuilder) Build() EngineSpec

Build method constructs the final EngineSpec object. It applies all the functions stored in the options slice to the EngineSpec and returns it.

func (*EngineBuilder) WithParam added in v1.0.4

func (b *EngineBuilder) WithParam(key string, value interface{}) *EngineBuilder

WithParam is a builder method that sets a key-value pair in the Params field of the EngineSpec. It returns the EngineBuilder for further chaining of builder methods.

func (*EngineBuilder) WithType added in v1.0.4

func (b *EngineBuilder) WithType(t string) *EngineBuilder

WithType is a builder method that sets the Type field of the EngineSpec. It returns the EngineBuilder for further chaining of builder methods.

type EngineSpec added in v1.0.4

type EngineSpec struct {
	Type   string
	Params map[string]interface{}
}

func DeserializeEngineSpec added in v1.0.4

func DeserializeEngineSpec(in []byte) (EngineSpec, error)

DeserializeEngineSpec takes a byte slice as input, attempts to unmarshal it into an EngineSpec struct. If the unmarshalling is successful, it returns the populated EngineSpec and a nil error. In case of any error during the unmarshalling process, it returns an empty EngineSpec and the error.

func (EngineSpec) Engine added in v1.0.4

func (e EngineSpec) Engine() Engine

func (EngineSpec) Format added in v1.0.4

func (e EngineSpec) Format(f fmt.State, c rune)

func (EngineSpec) Serialize added in v1.0.4

func (e EngineSpec) Serialize() ([]byte, error)

Serialize method for the EngineSpec struct takes no arguments. If the Params field of the EngineSpec is nil, it returns an empty byte slice and a nil error. Otherwise, it attempts to convert the EngineSpec instance into a JSON-formatted byte slice. If successful, it returns the byte slice and a nil error. If an error occurs during the JSON marshaling process, it returns an empty byte slice and the error.

func (EngineSpec) String added in v1.0.4

func (e EngineSpec) String() string

type ExcludedTag

type ExcludedTag string

We use these types to make it harder to accidentally mix up passing the wrong annotations to the wrong argument, e.g. avoid Excluded = []string{"included"}

type ExecutionDesiredState added in v1.0.4

type ExecutionDesiredState int
const (
	ExecutionDesiredStatePending ExecutionDesiredState = iota
	ExecutionDesiredStateRunning
	ExecutionDesiredStateStopped
)

type ExecutionID

type ExecutionID struct {
	JobID       string `json:"JobID,omitempty"`
	NodeID      string `json:"NodeID,omitempty"`
	ExecutionID string `json:"ExecutionID,omitempty"`
}

ExecutionID a globally unique identifier for an execution

func (ExecutionID) String

func (e ExecutionID) String() string

String returns a string representation of the execution id

type ExecutionState

type ExecutionState struct {
	// JobID the job id
	JobID string `json:"JobID"`
	// which node is running this execution
	NodeID string `json:"NodeId"`
	// Compute node reference for this job execution
	ComputeReference string `json:"ComputeReference"`
	// State is the current state of the execution
	State ExecutionStateType `json:"State"`
	// an arbitrary status message
	Status string `json:"Status,omitempty"`
	// DesiredState is the desired state of the execution
	DesiredState ExecutionDesiredState `json:"DesiredState,omitempty"`
	// the published results for this execution
	PublishedResult StorageSpec `json:"PublishedResults,omitempty"`

	// RunOutput of the job
	RunOutput *RunCommandResult `json:"RunOutput,omitempty"`
	// Version is the version of the job state. It is incremented every time the job state is updated.
	Version int `json:"Version"`
	// CreateTime is the time when the job was created.
	CreateTime time.Time `json:"CreateTime"`
	// UpdateTime is the time when the job state was last updated.
	UpdateTime time.Time `json:"UpdateTime"`
}

func (ExecutionState) ID

func (e ExecutionState) ID() ExecutionID

ID returns the ID for this execution

func (ExecutionState) String

func (e ExecutionState) String() string

String returns a string representation of the execution

type ExecutionStateType

type ExecutionStateType int

ExecutionStateType The state of an execution. An execution represents a single attempt to execute a job on a node. A compute node can have multiple executions for the same job due to retries, but there can only be a single active execution per node at any given time.

const (
	ExecutionStateUndefined ExecutionStateType = iota
	// ExecutionStateNew The execution has been created, but not pushed to a compute node yet.
	ExecutionStateNew ExecutionStateType = iota
	// ExecutionStateAskForBid A node has been selected to execute a job, and is being asked to bid on the job.
	ExecutionStateAskForBid
	// ExecutionStateAskForBidAccepted compute node has rejected the ask for bid.
	ExecutionStateAskForBidAccepted
	// ExecutionStateAskForBidRejected compute node has rejected the ask for bid.
	ExecutionStateAskForBidRejected
	// ExecutionStateBidAccepted requester has accepted the bid, and the execution is expected to be running on the compute node.
	ExecutionStateBidAccepted // aka running
	// ExecutionStateBidRejected requester has rejected the bid.
	ExecutionStateBidRejected
	// ExecutionStateCompleted The execution has been completed, and the result has been published.
	ExecutionStateCompleted
	// ExecutionStateFailed The execution has failed.
	ExecutionStateFailed
	// ExecutionStateCancelled The execution has been canceled by the user
	ExecutionStateCancelled
)

func ExecutionStateTypes

func ExecutionStateTypes() []ExecutionStateType

func (ExecutionStateType) IsActive

func (s ExecutionStateType) IsActive() bool

IsActive returns true if the execution is running or has completed

func (ExecutionStateType) IsDiscarded

func (s ExecutionStateType) IsDiscarded() bool

IsDiscarded returns true if the execution has been discarded due to a failure, rejection or cancellation

func (ExecutionStateType) IsPending added in v1.0.4

func (s ExecutionStateType) IsPending() bool

IsPending returns true if the execution is still pending approval and did not yet start running or has been discarded

func (ExecutionStateType) IsTerminal

func (s ExecutionStateType) IsTerminal() bool

IsTerminal returns true if the execution is in a terminal state where no further state changes are possible

func (ExecutionStateType) IsUndefined added in v1.0.4

func (s ExecutionStateType) IsUndefined() bool

IsUndefined returns true if the execution state is undefined

func (ExecutionStateType) MarshalText

func (s ExecutionStateType) MarshalText() ([]byte, error)

func (ExecutionStateType) String

func (i ExecutionStateType) String() string

func (*ExecutionStateType) UnmarshalText

func (s *ExecutionStateType) UnmarshalText(text []byte) (err error)

type FailureInjectionComputeConfig added in v1.0.4

type FailureInjectionComputeConfig struct {
	IsBadActor bool
}

type FailureInjectionConfig added in v1.0.4

type FailureInjectionConfig struct {
	Compute   FailureInjectionComputeConfig
	Requester FailureInjectionRequesterConfig
}

type FailureInjectionRequesterConfig added in v1.0.4

type FailureInjectionRequesterConfig struct {
	IsBadActor bool `yaml:"IsBadActor"`
}

type HTTPResource

type HTTPResource string

type IPFSResource

type IPFSResource string

type IPLDMap

type IPLDMap[K comparable, V any] struct {
	Keys   []K
	Values map[K]V
}

IPLD Maps are parsed by the ipld library into structures of this type rather than just plain Go maps.

type IncludedTag

type IncludedTag string

We use these types to make it harder to accidentally mix up passing the wrong annotations to the wrong argument, e.g. avoid Excluded = []string{"included"}

type Job

type Job struct {
	APIVersion string `json:"APIVersion" example:"V1beta1"`

	// TODO this doesn't seem like it should be a part of the job as it cannot be known by a client ahead of time.
	Metadata Metadata `json:"Metadata,omitempty"`

	// The specification of this job.
	Spec Spec `json:"Spec,omitempty"`
}

Job contains data about a job request in the bacalhau network.

func NewJob

func NewJob() *Job

TODO: There's probably a better way we want to globally version APIs

func NewJobWithSaneProductionDefaults

func NewJobWithSaneProductionDefaults() (*Job, error)

func (Job) ID added in v0.3.24

func (j Job) ID() string

ID returns the ID of the job.

func (Job) String added in v0.3.24

func (j Job) String() string

String returns the id of the job.

func (Job) Type added in v1.0.4

func (j Job) Type() string

Type returns the type of the job.

type JobCancelPayload

type JobCancelPayload struct {
	// the id of the client that is submitting the job
	ClientID string `json:"ClientID,omitempty" validate:"required"`

	// the job id of the job to be canceled
	JobID string `json:"JobID,omitempty" validate:"required"`

	// The reason that the job is being canceled
	Reason string `json:"Reason,omitempty"`
}

func (JobCancelPayload) GetClientID added in v0.3.24

func (j JobCancelPayload) GetClientID() string

type JobCreatePayload

type JobCreatePayload struct {
	// the id of the client that is submitting the job
	ClientID string `json:"ClientID,omitempty" validate:"required"`

	APIVersion string `json:"APIVersion,omitempty" example:"V1beta1" validate:"required"`

	// The specification of this job.
	Spec *Spec `json:"Spec,omitempty" validate:"required"`
}

func (JobCreatePayload) GetClientID added in v0.3.24

func (j JobCreatePayload) GetClientID() string

type JobEvent

type JobEvent struct {
	JobID string `json:"JobID,omitempty" example:"9304c616-291f-41ad-b862-54e133c0149e"`
	// compute execution identifier
	ExecutionID string `json:"ExecutionID,omitempty" example:"9304c616-291f-41ad-b862-54e133c0149e"`
	// the node that emitted this event
	SourceNodeID string `json:"SourceNodeID,omitempty" example:"QmXaXu9N5GNetatsvwnTfQqNtSeKAD6uCmarbh3LMRYAcF"`
	// the node that this event is for
	// e.g. "AcceptJobBid" was emitted by Requester but it targeting compute node
	TargetNodeID string `json:"TargetNodeID,omitempty" example:"QmdZQ7ZbhnvWY1J12XYKGHApJ6aufKyLNSvf8jZBrBaAVL"`

	EventName JobEventType `json:"EventName,omitempty"`
	Status    string       `json:"Status,omitempty" example:"Got results proposal of length: 0"`

	EventTime time.Time `json:"EventTime,omitempty" example:"2022-11-17T13:32:55.756658941Z"`
}

type JobEventType

type JobEventType int
const (

	// Job has been created on the requestor node
	JobEventCreated JobEventType

	// a compute node bid on a job
	JobEventBid

	// a requester node accepted for rejected a job bid
	JobEventBidAccepted
	JobEventBidRejected

	// a compute node had an error running a job
	JobEventComputeError

	// a compute node completed running a job
	JobEventResultsProposed

	// a Requester node accepted the results from a node for a job
	JobEventResultsAccepted

	// a Requester node rejected the results from a node for a job
	JobEventResultsRejected

	// once the results have been accepted or rejected
	// the compute node will publish them and issue this event
	JobEventResultsPublished

	// a requester node declared an error running a job
	JobEventError

	// a user canceled a job
	JobEventCanceled

	// a job has been completed
	JobEventCompleted
)

func JobEventTypes

func JobEventTypes() []JobEventType

func ParseJobEventType

func ParseJobEventType(str string) (JobEventType, error)

func (JobEventType) IsTerminal

func (je JobEventType) IsTerminal() bool

IsTerminal returns true if the given event type signals the end of the lifecycle of a job. After this, all nodes can safely ignore the job.

func (JobEventType) IsUndefined added in v1.0.4

func (je JobEventType) IsUndefined() bool

func (JobEventType) MarshalText

func (je JobEventType) MarshalText() ([]byte, error)

func (JobEventType) String

func (i JobEventType) String() string

func (*JobEventType) UnmarshalText

func (je *JobEventType) UnmarshalText(text []byte) (err error)

type JobHistory

type JobHistory struct {
	Type             JobHistoryType                   `json:"Type"`
	JobID            string                           `json:"JobID"`
	NodeID           string                           `json:"NodeID,omitempty"`
	ComputeReference string                           `json:"ComputeReference,omitempty"`
	JobState         *StateChange[JobStateType]       `json:"JobState,omitempty"`
	ExecutionState   *StateChange[ExecutionStateType] `json:"ExecutionState,omitempty"`
	NewVersion       int                              `json:"NewVersion"`
	Comment          string                           `json:"Comment,omitempty"`
	Time             time.Time                        `json:"Time"`
}

JobHistory represents a single event in the history of a job. An event can be at the job level, or execution (node) level.

{Job,Event}State fields will only be present if the Type field is of the matching type.

type JobHistoryType

type JobHistoryType int
const (
	JobHistoryTypeJobLevel JobHistoryType
	JobHistoryTypeExecutionLevel
)

func (JobHistoryType) MarshalText

func (s JobHistoryType) MarshalText() ([]byte, error)

func (JobHistoryType) String

func (i JobHistoryType) String() string

func (*JobHistoryType) UnmarshalText

func (s *JobHistoryType) UnmarshalText(text []byte) (err error)

type JobRequester

type JobRequester struct {
	// The ID of the requester node that owns this job.
	RequesterNodeID string `json:"RequesterNodeID,omitempty" example:"QmXaXu9N5GNetatsvwnTfQqNtSeKAD6uCmarbh3LMRYAcF"`

	// The public key of the Requester node that created this job
	// This can be used to encrypt messages back to the creator
	RequesterPublicKey PublicKey `json:"RequesterPublicKey,omitempty"`
}

type JobSelectionDataLocality

type JobSelectionDataLocality int64

Job selection policy configuration

const (
	Local    JobSelectionDataLocality = 0 // local
	Anywhere JobSelectionDataLocality = 1 // anywhere
)

func ParseJobSelectionDataLocality added in v0.3.26

func ParseJobSelectionDataLocality(s string) (ret JobSelectionDataLocality, err error)

func (JobSelectionDataLocality) MarshalYAML added in v1.0.4

func (i JobSelectionDataLocality) MarshalYAML() (interface{}, error)

func (JobSelectionDataLocality) String added in v0.3.26

func (i JobSelectionDataLocality) String() string

func (*JobSelectionDataLocality) UnmarshalText added in v1.0.4

func (i *JobSelectionDataLocality) UnmarshalText(text []byte) error

func (*JobSelectionDataLocality) UnmarshalYAML added in v1.0.4

func (i *JobSelectionDataLocality) UnmarshalYAML(value *yaml.Node) error

type JobSelectionPolicy

type JobSelectionPolicy struct {
	// this describes if we should run a job based on
	// where the data is located - i.e. if the data is "local"
	// or if the data is "anywhere"
	Locality JobSelectionDataLocality `json:"locality" yaml:"Locality"`
	// should we reject jobs that don't specify any data
	// the default is "accept"
	RejectStatelessJobs bool `json:"reject_stateless_jobs" yaml:"RejectStatelessJobs"`
	// should we accept jobs that specify networking
	// the default is "reject"
	AcceptNetworkedJobs bool `json:"accept_networked_jobs" yaml:"AcceptNetworkedJobs"`
	// external hooks that decide if we should take on the job or not
	// if either of these are given they will override the data locality settings
	ProbeHTTP string `json:"probe_http,omitempty" yaml:"ProbeHTTP"`
	ProbeExec string `json:"probe_exec,omitempty" yaml:"ProbeExec"`
}

describe the rules for how a compute node selects an incoming job

func NewDefaultJobSelectionPolicy

func NewDefaultJobSelectionPolicy() JobSelectionPolicy

generate a default empty job selection policy

type JobSpecDocker

type JobSpecDocker struct {
	// this should be pullable by docker
	Image string `json:"Image,omitempty"`
	// optionally override the default entrypoint
	Entrypoint []string `json:"Entrypoint,omitempty"`
	// Parameters holds additional commandline arguments
	Parameters []string `json:"Parameters,omitempty"`
	// a map of env to run the container with
	EnvironmentVariables []string `json:"EnvironmentVariables,omitempty"`
	// working directory inside the container
	WorkingDirectory string `json:"WorkingDirectory,omitempty"`
}

for VM style executors

type JobSpecWasm

type JobSpecWasm struct {
	// The module that contains the WASM code to start running.
	EntryModule StorageSpec `json:"EntryModule,omitempty"`

	// The name of the function in the EntryModule to call to run the job. For
	// WASI jobs, this will always be `_start`, but jobs can choose to call
	// other WASM functions instead. The EntryPoint must be a zero-parameter
	// zero-result function.
	EntryPoint string `json:"EntryPoint,omitempty"`

	// The arguments supplied to the program (i.e. as ARGV).
	Parameters []string `json:"Parameters,omitempty"`

	// The variables available in the environment of the running program.
	EnvironmentVariables map[string]string `json:"EnvironmentVariables,omitempty"`

	// TODO #880: Other WASM modules whose exports will be available as imports
	// to the EntryModule.
	ImportModules []StorageSpec `json:"ImportModules,omitempty"`
}

Describes a raw WASM job

type JobState

type JobState struct {
	// JobID is the unique identifier for the job
	JobID string `json:"JobID"`
	// Executions is a list of executions of the job across the nodes.
	// A new execution is created when a node is selected to execute the job, and a node can have multiple executions for the same
	// job due to retries, but there can only be a single active execution per node at any given time.
	Executions []ExecutionState `json:"Executions"`
	// State is the current state of the job
	State JobStateType `json:"State"`
	// Version is the version of the job state. It is incremented every time the job state is updated.
	Version int `json:"Version"`
	// CreateTime is the time when the job was created.
	CreateTime time.Time `json:"CreateTime"`
	// UpdateTime is the time when the job state was last updated.
	UpdateTime time.Time `json:"UpdateTime"`
	// TimeoutAt is the time when the job will be timed out if it is not completed.
	TimeoutAt time.Time `json:"TimeoutAt,omitempty"`
}

JobState The state of a job across the whole network that represents an aggregate view across the executions and nodes.

func (*JobState) ActiveCount added in v1.0.4

func (s *JobState) ActiveCount() int

func (*JobState) CompletedCount added in v1.0.4

func (s *JobState) CompletedCount() int

func (*JobState) GroupExecutionsByState added in v0.3.25

func (s *JobState) GroupExecutionsByState() map[ExecutionStateType][]ExecutionState

GroupExecutionsByState groups the executions by state

func (*JobState) NonDiscardedCount added in v1.0.4

func (s *JobState) NonDiscardedCount() int

func (*JobState) NonTerminalExecutions added in v1.0.4

func (s *JobState) NonTerminalExecutions() []*ExecutionState

NonTerminalExecutions returns the executions that are not in a terminal state.

type JobStateType

type JobStateType int

JobStateType The state of a job across the whole network that represents an aggregate view across the executions and nodes.

const (
	JobStateUndefined JobStateType = iota

	JobStateNew

	JobStateInProgress

	// Job is canceled by the user.
	JobStateCancelled

	// Job have failed
	JobStateError

	// Job completed successfully
	JobStateCompleted

	// Job is waiting to be scheduled.
	JobStateQueued
)

these are the states a job can be in against a single node

func JobStateTypes added in v0.3.26

func JobStateTypes() []JobStateType

func (JobStateType) IsTerminal

func (s JobStateType) IsTerminal() bool

IsTerminal returns true if the given job type signals the end of the lifecycle of that job and that no change in the state can be expected.

func (JobStateType) IsUndefined added in v1.0.4

func (s JobStateType) IsUndefined() bool

IsUndefined returns true if the job state is undefined

func (JobStateType) MarshalText

func (s JobStateType) MarshalText() ([]byte, error)

func (JobStateType) String

func (i JobStateType) String() string

func (*JobStateType) UnmarshalText

func (s *JobStateType) UnmarshalText(text []byte) (err error)

type JobType

type JobType interface {
	UnmarshalInto(with string, spec *Spec) error
}

type JobWithInfo

type JobWithInfo struct {
	// Job info
	Job Job `json:"Job"`
	// The current state of the job
	State JobState `json:"State"`
	// History of changes to the job state. Not always populated in the job description
	History []JobHistory `json:"History,omitempty"`
}

JobWithInfo is the job request + the result of attempting to run it on the network

type KeyInt

type KeyInt int

type KeyString

type KeyString string

type LabelSelectorRequirement

type LabelSelectorRequirement struct {
	// key is the label key that the selector applies to.
	Key string `json:"Key"`
	// operator represents a key's relationship to a set of values.
	// Valid operators are In, NotIn, Exists and DoesNotExist.
	Operator selection.Operator `json:"Operator"`
	// values is an array of string values. If the operator is In or NotIn,
	// the values array must be non-empty. If the operator is Exists or DoesNotExist,
	// the values array must be empty. This array is replaced during a strategic
	Values []string `json:"Values,omitempty"`
}

LabelSelectorRequirement A selector that contains values, a key, and an operator that relates the key and values. These are based on labels library from kubernetes package. While we use labels.Requirement to represent the label selector requirements in the command line arguments as the library supports multiple parsing formats, and we also use it when matching selectors to labels as that's what the library expects, labels.Requirements are not serializable, so we need to convert them to LabelSelectorRequirements.

func ToLabelSelectorRequirements

func ToLabelSelectorRequirements(requirements ...labels.Requirement) []LabelSelectorRequirement

func (LabelSelectorRequirement) String added in v1.0.4

func (r LabelSelectorRequirement) String() string

type LogsPayload added in v0.3.26

type LogsPayload struct {
	// the id of the client that is requesting the logs
	ClientID string `json:"ClientID,omitempty" validate:"required"`

	// the job id of the job to be shown
	JobID string `json:"JobID,omitempty" validate:"required"`

	// the execution to be shown
	ExecutionID string `json:"ExecutionID,omitempty" validate:"required"`

	// whether the logs history is required
	WithHistory bool `json:"WithHistory,omitempty"`

	// whether the logs should be followed after the current logs are shown
	Follow bool `json:"Follow,omitempty"`
}

func (LogsPayload) GetClientID added in v0.3.26

func (j LogsPayload) GetClientID() string

type Metadata

type Metadata struct {
	// The unique global ID of this job in the bacalhau network.
	ID string `json:"ID,omitempty" example:"92d5d4ee-3765-4f78-8353-623f5f26df08"`

	// Time the job was submitted to the bacalhau network.
	CreatedAt time.Time `json:"CreatedAt,omitempty" example:"2022-11-17T13:29:01.871140291Z"`

	// The ID of the client that created this job.
	ClientID string `json:"ClientID,omitempty" example:"ac13188e93c97a9c2e7cf8e86c7313156a73436036f30da1ececc2ce79f9ea51"`

	Requester JobRequester `json:"Requester,omitempty"`
}

type Millicores

type Millicores int

A Millicores represents a thousandth of a CPU core, which is a unit of measure used by Kubernetes. See also https://github.com/BTBurke/k8sresource.

const (
	Millicore Millicores = 1
	Core      Millicores = 1000
)

func (Millicores) String

func (m Millicores) String() string

String returns a string representation of this Millicore, which is either an integer if this Millicore represents a whole number of cores or the number of Millicores suffixed with "m".

type Network

type Network int
const (
	// NetworkNone specifies that the job does not require networking.
	NetworkNone Network = iota

	// NetworkFull specifies that the job requires unfiltered raw IP networking.
	NetworkFull

	// NetworkHTTP specifies that the job requires HTTP networking to certain domains.
	//
	// The model is: the job specifier submits a job with the domain(s) it will
	// need to communicate with, the compute provider uses this to make some
	// decision about the risk of the job and bids accordingly, and then at run
	// time the traffic is limited to only the domain(s) specified.
	//
	// As a command, something like:
	//
	//  bacalhau docker run —network=http —domain=crates.io —domain=github.com -i ipfs://Qmy1234myd4t4,dst=/code rust/compile
	//
	// The “risk” for the compute provider is that the job does something that
	// violates its terms, the terms of its hosting provider or ISP, or even the
	// law in its jurisdiction (e.g. accessing and spreading illegal content,
	// performing cyberattacks). So the same sort of risk as operating a Tor
	// exit node.
	//
	// The risk for the job specifier is that we are operating in an environment
	// they are paying for, so there is an incentive to hijack that environment
	// (e.g. via a compromised package download that runs a crypto miner on
	// install, and uses up all the paid-for job time). Having the traffic
	// enforced to only domains specified makes those sorts of attacks much
	// trickier and less valuable.
	//
	// The compute provider might well enforce its limits by other means, but
	// having the domains specified up front allows it to skip bidding on jobs
	// it knows will fail in its executor. So this is hopefully a better UX for
	// job specifiers who can have their job picked up only by someone who will
	// run it successfully.
	NetworkHTTP
)

func ParseNetwork

func ParseNetwork(s string) (Network, error)

func (Network) MarshalText

func (n Network) MarshalText() ([]byte, error)

func (Network) String

func (i Network) String() string

func (*Network) UnmarshalText

func (n *Network) UnmarshalText(text []byte) (err error)

type NetworkConfig

type NetworkConfig struct {
	Type    Network  `json:"Type"`
	Domains []string `json:"Domains,omitempty"`
}

func (NetworkConfig) Disabled

func (n NetworkConfig) Disabled() bool

Disabled returns whether network connections should be completely disabled according to this config.

func (NetworkConfig) DomainSet

func (n NetworkConfig) DomainSet() []string

DomainSet returns the "unique set" of domains from the network config. Domains listed multiple times and any subdomain that is also matched by a wildcard is removed.

This is something of an implementation detail – it matches the behavior expected by our Docker HTTP gateway, which complains and/or fails to start if these requirements are not met.

func (NetworkConfig) IsValid

func (n NetworkConfig) IsValid() (err error)

IsValid returns an error if any of the fields do not pass validation, or nil otherwise.

type NodeInfo

type NodeInfo struct {
	BacalhauVersion BuildVersionInfo  `json:"BacalhauVersion"`
	PeerInfo        peer.AddrInfo     `json:"PeerInfo"`
	NodeType        NodeType          `json:"NodeType"`
	Labels          map[string]string `json:"Labels"`
	ComputeNodeInfo *ComputeNodeInfo  `json:"ComputeNodeInfo"`
}

func (NodeInfo) IsComputeNode

func (n NodeInfo) IsComputeNode() bool

IsComputeNode returns true if the node is a compute node

type NodeInfoProvider

type NodeInfoProvider interface {
	GetNodeInfo(ctx context.Context) NodeInfo
}

type NodeType

type NodeType int
const (
	NodeTypeRequester NodeType = iota
	NodeTypeCompute
)

func (NodeType) String

func (i NodeType) String() string

type NoopTask

type NoopTask struct{}

func (NoopTask) UnmarshalInto

func (n NoopTask) UnmarshalInto(with string, spec *Spec) error

type PublicKey

type PublicKey []byte

func (PublicKey) MarshalText

func (pk PublicKey) MarshalText() ([]byte, error)

func (PublicKey) String added in v1.0.4

func (pk PublicKey) String() string

func (*PublicKey) UnmarshalText

func (pk *PublicKey) UnmarshalText(text []byte) error

type PublishedResult

type PublishedResult struct {
	NodeID string      `json:"NodeID,omitempty"`
	Data   StorageSpec `json:"Data,omitempty"`
}

PublishedStorageSpec is a wrapper for a StorageSpec that has been published by a compute provider - it keeps info about the host job that lead to the given storage spec being published

type Publisher

type Publisher int
const (
	PublisherNoop Publisher
	PublisherIpfs
	PublisherEstuary
	PublisherS3
)

func ParsePublisher

func ParsePublisher(str string) (Publisher, error)

func PublisherTypes

func PublisherTypes() []Publisher

func (Publisher) MarshalText

func (p Publisher) MarshalText() ([]byte, error)

func (Publisher) String

func (p Publisher) String() string

func (*Publisher) UnmarshalText

func (p *Publisher) UnmarshalText(text []byte) (err error)

type PublisherSpec added in v0.3.26

type PublisherSpec struct {
	Type   Publisher              `json:"Type,omitempty"`
	Params map[string]interface{} `json:"Params,omitempty"`
}

type Resource

type Resource struct {
	IPFS *IPFSResource
	HTTP *HTTPResource
}

type ResourceSpec

type ResourceSpec struct {
	Cpu    Millicores //nolint:stylecheck // name required by IPLD
	Disk   datasize.ByteSize
	Memory datasize.ByteSize
	Gpu    int
}

type ResourceUsageConfig

type ResourceUsageConfig struct {
	// https://github.com/BTBurke/k8sresource string
	CPU string `json:"CPU,omitempty" yaml:"CPU"`
	// github.com/c2h5oh/datasize string
	Memory string `json:"Memory,omitempty" yaml:"Memory"`

	Disk string `json:"Disk,omitempty" yaml:"Disk"`
	GPU  string `json:"GPU" yaml:"GPU"` // unsigned integer string

}

type ResourceUsageData

type ResourceUsageData struct {
	// cpu units
	CPU float64 `json:"CPU,omitempty" example:"9.600000000000001"`
	// bytes
	Memory uint64 `json:"Memory,omitempty" example:"27487790694"`
	// bytes
	Disk uint64 `json:"Disk,omitempty" example:"212663867801"`
	GPU  uint64 `json:"GPU,omitempty" example:"1"` //nolint:lll // Support whole GPUs only, like https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
}

these are the numeric values in bytes for ResourceUsageConfig

func ParseResourceUsageConfig added in v1.0.4

func ParseResourceUsageConfig(usage ResourceUsageConfig) ResourceUsageData

func (ResourceUsageData) Add

func (ResourceUsageData) Intersect

func (ResourceUsageData) IsZero

func (r ResourceUsageData) IsZero() bool

func (ResourceUsageData) LessThan

func (r ResourceUsageData) LessThan(other ResourceUsageData) bool

func (ResourceUsageData) LessThanEq

func (r ResourceUsageData) LessThanEq(other ResourceUsageData) bool

func (ResourceUsageData) Max

func (ResourceUsageData) Multi

func (ResourceUsageData) String

func (r ResourceUsageData) String() string

return string representation of ResourceUsageData

func (ResourceUsageData) Sub

type ResourceUsageProfile

type ResourceUsageProfile struct {
	// how many resources does the job want to consume
	Job ResourceUsageData `json:"Job,omitempty"`
	// how many resources is the system currently using
	SystemUsing ResourceUsageData `json:"SystemUsing,omitempty"`
	// what is the total amount of resources available to the system
	SystemTotal ResourceUsageData `json:"SystemTotal,omitempty"`
}

type RunCommandResult

type RunCommandResult struct {
	// stdout of the run. Yaml provided for `describe` output
	STDOUT string `json:"stdout"`

	// bool describing if stdout was truncated
	StdoutTruncated bool `json:"stdouttruncated"`

	// stderr of the run.
	STDERR string `json:"stderr"`

	// bool describing if stderr was truncated
	StderrTruncated bool `json:"stderrtruncated"`

	// exit code of the run.
	ExitCode int `json:"exitCode"`

	// Runner error
	ErrorMsg string `json:"runnerError"`
}

func NewRunCommandResult

func NewRunCommandResult() *RunCommandResult

type S3StorageSpec added in v0.3.26

type S3StorageSpec struct {
	Bucket         string `json:"Bucket,omitempty"`
	Key            string `json:"Key,omitempty"`
	ChecksumSHA256 string `json:"Checksum,omitempty"`
	VersionID      string `json:"VersionID,omitempty"`
	Endpoint       string `json:"Endpoint,omitempty"`
	Region         string `json:"Region,omitempty"`
}

type Schema

type Schema schema.TypeSystem
var (
	// The UCAN Task schema is the standardized Invocation IPLD schema, defined
	// by https://github.com/ucan-wg/invocation.
	UCANTaskSchema *Schema = load(ucanTaskSchemaPath)

	// The Bacalhau schema includes the Bacalhau specific extensions to the UCAN
	// Task IPLD spec, i.e. input structures for specific job types.
	BacalhauTaskSchema *Schema = load(bacalhauTaskSchemaPath)
)

func (*Schema) GetSchemaType

func (s *Schema) GetSchemaType(obj interface{}) schema.Type

GetSchemaType returns the IPLD type from the schema for the passed Go object. If the type is not in the schema, it returns nil.

func (*Schema) GetSchemaTypeName

func (s *Schema) GetSchemaTypeName(obj interface{}) string

GetSchemaTypeName returns the name of the corresponding IPLD type in the schema for the passed Go object. If the type cannot be in the schema, it returns an empty string. It may return a non-empty string even if the type is not in the schema.

type Spec

type Spec struct {
	// Deprecated: use EngineSpec.
	Engine Engine `json:"Engine,omitempty"`

	EngineSpec EngineSpec `json:"EngineSpec,omitempty"`

	// Deprecated: use PublisherSpec instead
	Publisher     Publisher     `json:"Publisher,omitempty"`
	PublisherSpec PublisherSpec `json:"PublisherSpec,omitempty"`

	// Deprecated: use EngineSpec.
	Docker JobSpecDocker `json:"Docker,omitempty"`
	// Deprecated: use EngineSpec.
	Wasm JobSpecWasm `json:"Wasm,omitempty"`

	// the compute (cpu, ram) resources this job requires
	Resources ResourceUsageConfig `json:"Resources,omitempty"`

	// The type of networking access that the job needs
	Network NetworkConfig `json:"Network,omitempty"`

	// How long a job can run in seconds before it is killed.
	// This includes the time required to run, verify and publish results
	Timeout int64 `json:"Timeout,omitempty"`

	// the data volumes we will read in the job
	// for example "read this ipfs cid"
	Inputs []StorageSpec `json:"Inputs,omitempty"`

	// the data volumes we will write in the job
	// for example "write the results to ipfs"
	Outputs []StorageSpec `json:"Outputs,omitempty"`

	// Annotations on the job - could be user or machine assigned
	Annotations []string `json:"Annotations,omitempty"`

	// NodeSelectors is a selector which must be true for the compute node to run this job.
	NodeSelectors []LabelSelectorRequirement `json:"NodeSelectors,omitempty"`

	// Do not track specified by the client
	DoNotTrack bool `json:"DoNotTrack,omitempty"`

	// The deal the client has made, such as which job bids they have accepted.
	Deal Deal `json:"Deal,omitempty"`
}

Spec is a complete specification of a job that can be run on some execution provider.

func (*Spec) AllStorageSpecs

func (s *Spec) AllStorageSpecs() []*StorageSpec

Return pointers to all the storage specs in the spec.

func (*Spec) GetTimeout

func (s *Spec) GetTimeout() time.Duration

Return timeout duration

type StateChange added in v0.3.24

type StateChange[StateType any] struct {
	Previous StateType `json:"Previous,omitempty"`
	New      StateType `json:"New,omitempty"`
}

StateChange represents a change in state of one of the state types.

type StorageSourceType

type StorageSourceType int

StorageSourceType is somewhere we can get data from e.g. ipfs / S3 are storage sources there can be multiple drivers for the same source e.g. ipfs fuse vs ipfs api copy

const (
	StorageSourceIPFS StorageSourceType
	StorageSourceRepoClone
	StorageSourceRepoCloneLFS
	StorageSourceURLDownload
	StorageSourceEstuary
	StorageSourceInline
	StorageSourceLocalDirectory
	StorageSourceS3
)

func ParseStorageSourceType

func ParseStorageSourceType(str string) (StorageSourceType, error)

func StorageSourceTypes

func StorageSourceTypes() []StorageSourceType

func (StorageSourceType) MarshalText

func (ss StorageSourceType) MarshalText() ([]byte, error)

func (StorageSourceType) String

func (ss StorageSourceType) String() string

func (*StorageSourceType) UnmarshalText

func (ss *StorageSourceType) UnmarshalText(text []byte) (err error)

type StorageSpec

type StorageSpec struct {
	// StorageSource is the abstract source of the data. E.g. a storage source
	// might be a URL download, but doesn't specify how the execution engine
	// does the download or what it will do with the downloaded data.
	StorageSource StorageSourceType `json:"StorageSource,omitempty"`

	// Name of the spec's data, for reference.
	Name string `json:"Name,omitempty" example:"job-9304c616-291f-41ad-b862-54e133c0149e-host-QmdZQ7ZbhnvWY1J12XYKGHApJ6aufKyLNSvf8jZBrBaAVL"` //nolint:lll

	// The unique ID of the data, where it makes sense (for example, in an
	// IPFS storage spec this will be the data's CID).
	// NOTE: The below is capitalized to match IPFS & IPLD (even though it's out of golang fmt)
	CID string `json:"CID,omitempty" example:"QmTVmC7JBD2ES2qGPqBNVWnX1KeEPNrPGb7rJ8cpFgtefe"`

	// Source URL of the data
	URL string `json:"URL,omitempty"`

	S3 *S3StorageSpec `json:"S3,omitempty"`

	// URL of the git Repo to clone
	Repo string `json:"Repo,omitempty"`

	// The path of the host data if we are using local directory paths
	SourcePath string `json:"SourcePath,omitempty"`

	// Allow write access for locally mounted inputs
	ReadWrite bool `json:"ReadWrite,omitempty"`

	// The path that the spec's data should be mounted on, where it makes
	// sense (for example, in a Docker storage spec this will be a filesystem
	// path).
	Path string `json:"Path,omitempty"`

	// Additional properties specific to each driver
	Metadata map[string]string `json:"Metadata,omitempty"`
}

StorageSpec represents some data on a storage engine. Storage engines are specific to particular execution engines, as different execution engines will mount data in different ways.

type TargetingMode added in v1.0.4

type TargetingMode bool
const (
	TargetAny TargetingMode = false
	TargetAll TargetingMode = true
)

func ParseTargetingMode added in v1.0.4

func ParseTargetingMode(s string) (TargetingMode, error)

func (TargetingMode) String added in v1.0.4

func (t TargetingMode) String() string

type Task

type Task struct {
	With   string
	Do     TaskType
	Inputs datamodel.Node
	Meta   IPLDMap[string, datamodel.Node]
}

func (*Task) ToSpec

func (task *Task) ToSpec() (*Spec, error)

type TaskType

type TaskType string
const (
	TaskTypeDocker TaskType = "docker/run"
	TaskTypeWasm   TaskType = "wasm32-wasi/run"
	TaskTypeNoop   TaskType = "noop"
)

type TestFatalErrorHandlerContents

type TestFatalErrorHandlerContents struct {
	Message string
	Code    int
}

type VerificationResult

type VerificationResult struct {
	Complete bool `json:"Complete,omitempty"`
	Result   bool `json:"Result,omitempty"`
}

we need to use a struct for the result because: a) otherwise we don't know if VerificationResult==false means "I've not verified yet" or "verification failed" b) we might want to add further fields to the result later

type WasmEngineBuilder added in v1.0.4

type WasmEngineBuilder struct {
	// contains filtered or unexported fields
}

WasmEngineBuilder is a struct used for constructing an EngineSpec object specifically for WebAssembly (Wasm) engines using the Builder pattern. It embeds an EngineBuilder object for handling the common builder methods.

func NewWasmEngineBuilder added in v1.0.4

func NewWasmEngineBuilder(entryModule StorageSpec) *WasmEngineBuilder

NewWasmEngineBuilder function initializes a new WasmEngineBuilder instance. It sets the engine type to engine.EngineWasm.String() and entry module as per the input argument.

func (*WasmEngineBuilder) Build added in v1.0.4

func (b *WasmEngineBuilder) Build() EngineSpec

Build method constructs the final EngineSpec object by calling the embedded EngineBuilder's Build method.

func (*WasmEngineBuilder) WithEntrypoint added in v1.0.4

func (b *WasmEngineBuilder) WithEntrypoint(e string) *WasmEngineBuilder

WithEntrypoint is a builder method that sets the WebAssembly engine's entrypoint. It returns the WasmEngineBuilder for further chaining of builder methods.

func (*WasmEngineBuilder) WithEnvironmentVariables added in v1.0.4

func (b *WasmEngineBuilder) WithEnvironmentVariables(e map[string]string) *WasmEngineBuilder

WithEnvironmentVariables is a builder method that sets the WebAssembly engine's environment variables. It returns the WasmEngineBuilder for further chaining of builder methods.

func (*WasmEngineBuilder) WithImportModules added in v1.0.4

func (b *WasmEngineBuilder) WithImportModules(e ...StorageSpec) *WasmEngineBuilder

WithImportModules is a builder method that sets the WebAssembly engine's import modules. It returns the WasmEngineBuilder for further chaining of builder methods.

func (*WasmEngineBuilder) WithParameters added in v1.0.4

func (b *WasmEngineBuilder) WithParameters(e ...string) *WasmEngineBuilder

WithParameters is a builder method that sets the WebAssembly engine's parameters. It returns the WasmEngineBuilder for further chaining of builder methods.

type WasmEngineSpec added in v1.0.4

type WasmEngineSpec struct {
	// EntryModule is a Spec containing the WASM code to start running.
	EntryModule StorageSpec `json:"EntryModule,omitempty"`

	// Entrypoint is the name of the function in the EntryModule to call to run the job.
	// For WASI jobs, this will should be `_start`, but jobs can choose to call other WASM functions instead.
	// Entrypoint must be a zero-parameter zero-result function.
	Entrypoint string `json:"EntryPoint,omitempty"`

	// Parameters contains arguments supplied to the program (i.e. as ARGV).
	Parameters []string `json:"Parameters,omitempty"`

	// EnvironmentVariables contains variables available in the environment of the running program.
	EnvironmentVariables map[string]string `json:"EnvironmentVariables,omitempty"`

	// ImportModules is a slice of StorageSpec's containing WASM modules whose exports will be available as imports
	// to the EntryModule.
	ImportModules []StorageSpec `json:"ImportModules,omitempty"`
}

WasmEngineSpec contains necessary parameters to execute a wasm job.

type WasmInputs

type WasmInputs struct {
	Entrypoint string
	Parameters []string
	Modules    []Resource
	Mounts     IPLDMap[string, Resource] // Resource
	Outputs    IPLDMap[string, datamodel.Node]
	Env        IPLDMap[string, string]
}

func (*WasmInputs) UnmarshalInto

func (wasm *WasmInputs) UnmarshalInto(with string, spec *Spec) error

UnmarshalInto implements taskUnmarshal

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL