Documentation ¶
Index ¶
- Constants
- func ConfirmMaxSliceSize[T any](t T, maxSize int) error
- func EngineNames() []string
- func GetShardID(jobID string, shardIndex int) string
- func IsValidEngine(e Engine) bool
- func IsValidJobState(s JobStateType) bool
- func IsValidPublisher(publisherType Publisher) bool
- func IsValidStorageSourceType(sourceType StorageSourceType) bool
- func IsValidVerifier(verifierType Verifier) bool
- func JSONMarshalIndentWithMax[T any](t T, indentSpaces int) ([]byte, error)
- func JSONMarshalWithMax[T any](t T) ([]byte, error)
- func JSONUnmarshalWithMax[T any](b []byte, t *T) error
- func JobStateTypeNames() []string
- func PublisherNames() []string
- func StorageSourceNames() []string
- func VerifierNames() []string
- func YAMLMarshalWithMax[T any](t T) ([]byte, error)
- func YAMLUnmarshalWithMax[T any](b []byte, t *T) error
- type APIVersion
- type BuildVersionInfo
- type Deal
- type DebugInfo
- type DebugInfoProvider
- type Engine
- type Job
- type JobCreatePayload
- type JobEvent
- type JobEventType
- type JobExecutionPlan
- type JobLocalEvent
- type JobLocalEventType
- type JobNodeState
- type JobSelectionDataLocality
- type JobSelectionPolicy
- type JobShard
- type JobShardState
- type JobShardingConfig
- type JobSpecDocker
- type JobSpecLanguage
- type JobSpecWasm
- type JobState
- type JobStateType
- func (s JobStateType) HasPassedBidAcceptedStage() bool
- func (s JobStateType) IsComplete() bool
- func (s JobStateType) IsError() bool
- func (s JobStateType) IsTerminal() bool
- func (s JobStateType) MarshalText() ([]byte, error)
- func (i JobStateType) String() string
- func (s *JobStateType) UnmarshalText(text []byte) (err error)
- type JobWithInfo
- type KeyInt
- type KeyString
- type PublicKey
- type PublishedResult
- type Publisher
- type ResourceUsageConfig
- type ResourceUsageData
- func (r ResourceUsageData) Add(other ResourceUsageData) ResourceUsageData
- func (r ResourceUsageData) Intersect(other ResourceUsageData) ResourceUsageData
- func (r ResourceUsageData) IsZero() bool
- func (r ResourceUsageData) LessThanEq(other ResourceUsageData) bool
- func (r ResourceUsageData) Max(other ResourceUsageData) ResourceUsageData
- func (r ResourceUsageData) Multi(factor float64) ResourceUsageData
- func (r ResourceUsageData) String() string
- func (r ResourceUsageData) Sub(other ResourceUsageData) ResourceUsageData
- type ResourceUsageProfile
- type RunCommandResult
- type Spec
- type StorageSourceType
- type StorageSpec
- type TestFatalErrorHandlerContents
- type VerificationResult
- type Verifier
Constants ¶
const ( TracerAttributeNameNodeID = "nodeid" TracerAttributeNameJobID = "jobid" )
const JSONIndentSpaceNumber = 4
const MaxNumberOfObjectsToSerialize = 1000
Arbitrarily choosing 1000 jobs to serialize - this is a pretty high
const MaxSerializedStringInput = int(10 * datasize.MB)
const MaxSerializedStringOutput = int(10 * datasize.MB)
const ShortIDLength = 8
Variables ¶
This section is empty.
Functions ¶
func ConfirmMaxSliceSize ¶
func EngineNames ¶
func EngineNames() []string
func GetShardID ¶
func IsValidEngine ¶
func IsValidJobState ¶
func IsValidJobState(s JobStateType) bool
tells you if this event is a valid one
func IsValidPublisher ¶
func IsValidStorageSourceType ¶
func IsValidStorageSourceType(sourceType StorageSourceType) bool
func IsValidVerifier ¶
func JSONMarshalWithMax ¶
func JSONUnmarshalWithMax ¶
func JobStateTypeNames ¶
func JobStateTypeNames() []string
func PublisherNames ¶
func PublisherNames() []string
func StorageSourceNames ¶
func StorageSourceNames() []string
func VerifierNames ¶
func VerifierNames() []string
func YAMLMarshalWithMax ¶
func YAMLUnmarshalWithMax ¶
Types ¶
type APIVersion ¶
type APIVersion int
const ( V1alpha1 APIVersion V1beta1 )
func APIVersionLatest ¶
func APIVersionLatest() APIVersion
func (APIVersion) String ¶
func (i APIVersion) String() string
type BuildVersionInfo ¶
type BuildVersionInfo struct { Major string `json:"major,omitempty" example:"0"` Minor string `json:"minor,omitempty" example:"3"` GitVersion string `json:"gitversion" example:"v0.3.12"` GitCommit string `json:"gitcommit" example:"d612b63108f2b5ce1ab2b9e02444eb1dac1d922d"` BuildDate time.Time `json:"builddate" example:"2022-11-16T14:03:31Z"` GOOS string `json:"goos" example:"linux"` GOARCH string `json:"goarch" example:"amd64"` }
BuildVersionInfo is the version of a Bacalhau binary (either client or server)
type Deal ¶
type Deal struct { // The maximum number of concurrent compute node bids that will be // accepted by the requester node on behalf of the client. Concurrency int `json:"Concurrency,omitempty"` // The number of nodes that must agree on a verification result // this is used by the different verifiers - for example the // deterministic verifier requires the winning group size // to be at least this size Confidence int `json:"Confidence,omitempty"` // The minimum number of bids that must be received before the Requester // node will randomly accept concurrency-many of them. This allows the // Requester node to get some level of guarantee that the execution of the // jobs will be spread evenly across the network (assuming that this value // is some large proportion of the size of the network). MinBids int `json:"MinBids,omitempty"` }
The deal the client has made with the bacalhau network. This is updateable by the client who submitted the job
type DebugInfoProvider ¶
type Engine ¶
type Engine int
const ( EngineNoop Engine EngineDocker EngineWasm EngineLanguage // wraps python_wasm EnginePythonWasm // wraps docker )
func EngineTypes ¶
func EngineTypes() []Engine
func ParseEngine ¶
func (Engine) MarshalText ¶
func (*Engine) UnmarshalText ¶
type Job ¶
type Job struct { APIVersion string `json:"APIVersion" example:"V1beta1"` // The unique global ID of this job in the bacalhau network. ID string `json:"ID,omitempty" example:"92d5d4ee-3765-4f78-8353-623f5f26df08"` // The ID of the requester node that owns this job. RequesterNodeID string `json:"RequesterNodeID,omitempty" example:"QmXaXu9N5GNetatsvwnTfQqNtSeKAD6uCmarbh3LMRYAcF"` // The public key of the Requester node that created this job // This can be used to encrypt messages back to the creator RequesterPublicKey PublicKey `json:"RequesterPublicKey,omitempty"` // The ID of the client that created this job. ClientID string `json:"ClientID,omitempty" example:"ac13188e93c97a9c2e7cf8e86c7313156a73436036f30da1ececc2ce79f9ea51"` // The specification of this job. Spec Spec `json:"Spec,omitempty"` // The deal the client has made, such as which job bids they have accepted. Deal Deal `json:"Deal,omitempty"` // how will this job be executed by nodes on the network ExecutionPlan JobExecutionPlan `json:"ExecutionPlan,omitempty"` // Time the job was submitted to the bacalhau network. CreatedAt time.Time `json:"CreatedAt,omitempty" example:"2022-11-17T13:29:01.871140291Z"` // The current state of the job State JobState `json:"JobState,omitempty"` // All events associated with the job Events []JobEvent `json:"JobEvents,omitempty"` // All local events associated with the job LocalEvents []JobLocalEvent `json:"LocalJobEvents,omitempty"` }
Job contains data about a job request in the bacalhau network.
func NewJob ¶
func NewJob() *Job
TODO: There's probably a better way we want to globally version APIs
type JobCreatePayload ¶
type JobCreatePayload struct { // the id of the client that is submitting the job ClientID string `json:"ClientID,omitempty" validate:"required"` // The job specification: Job *Job `json:"Job,omitempty" validate:"required"` // Optional base64-encoded tar file that will be pinned to IPFS and // mounted as storage for the job. Not part of the spec so we don't // flood the transport layer with it (potentially very large). Context string `json:"Context,omitempty" validate:"optional"` }
type JobEvent ¶
type JobEvent struct { // APIVersion of the Job APIVersion string `json:"APIVersion,omitempty" example:"V1beta1"` JobID string `json:"JobID,omitempty" example:"9304c616-291f-41ad-b862-54e133c0149e"` // what shard is this event for ShardIndex int `json:"ShardIndex,omitempty"` // optional clientID if this is an externally triggered event (like create job) ClientID string `json:"ClientID,omitempty" example:"ac13188e93c97a9c2e7cf8e86c7313156a73436036f30da1ececc2ce79f9ea51"` // the node that emitted this event SourceNodeID string `json:"SourceNodeID,omitempty" example:"QmXaXu9N5GNetatsvwnTfQqNtSeKAD6uCmarbh3LMRYAcF"` // the node that this event is for // e.g. "AcceptJobBid" was emitted by Requester but it targeting compute node TargetNodeID string `json:"TargetNodeID,omitempty" example:"QmdZQ7ZbhnvWY1J12XYKGHApJ6aufKyLNSvf8jZBrBaAVL"` EventName JobEventType `json:"EventName,omitempty"` // this is only defined in "create" events Spec Spec `json:"Spec,omitempty"` // this is only defined in "create" events JobExecutionPlan JobExecutionPlan `json:"JobExecutionPlan,omitempty"` // this is only defined in "update_deal" events Deal Deal `json:"Deal,omitempty"` Status string `json:"Status,omitempty" example:"Got results proposal of length: 0"` VerificationProposal []byte `json:"VerificationProposal,omitempty"` VerificationResult VerificationResult `json:"VerificationResult,omitempty"` PublishedResult StorageSpec `json:"PublishedResult,omitempty"` EventTime time.Time `json:"EventTime,omitempty" example:"2022-11-17T13:32:55.756658941Z"` SenderPublicKey PublicKey `json:"SenderPublicKey,omitempty"` // RunOutput of the job RunOutput *RunCommandResult `json:"RunOutput,omitempty"` }
we emit these to other nodes so they update their state locally and can emit events locally
type JobEventType ¶
type JobEventType int
const ( // Job has been created by client and is communicating with requestor node JobEventInitialSubmission JobEventType // Job has been created on the requestor node JobEventCreated // the concurrency or other mutable properties of the job were // changed by the client JobEventDealUpdated // a compute node bid on a job JobEventBid // a requester node accepted for rejected a job bid JobEventBidAccepted JobEventBidRejected // a compute node canceled a job bid JobEventBidCancelled // a compute node progressed with running a job // this is called periodically for running jobs // to give the client confidence the job is still running // this is like a heartbeat for running jobs JobEventRunning // a compute node had an error running a job JobEventComputeError // a compute node completed running a job JobEventResultsProposed // a Requester node accepted the results from a node for a job JobEventResultsAccepted // a Requester node rejected the results from a node for a job JobEventResultsRejected // once the results have been accepted or rejected // the compute node will publish them and issue this event JobEventResultsPublished // a requester node declared an error running a job JobEventError // the requester node gives a compute node permission // to forget about the job and free any resources it might // currently be reserving - this can happen if a compute node // bids when a job has completed - if the compute node does // not hear back it will be stuck in reserving the resources for the job JobEventInvalidRequest )
func JobEventTypes ¶
func JobEventTypes() []JobEventType
func ParseJobEventType ¶
func ParseJobEventType(str string) (JobEventType, error)
func (JobEventType) IsIgnorable ¶
func (je JobEventType) IsIgnorable() bool
IsIgnorable returns true if given event type signals that a node can safely ignore the rest of the job's lifecycle. This is the case for events caused by a node's bid being rejected.
func (JobEventType) IsTerminal ¶
func (je JobEventType) IsTerminal() bool
IsTerminal returns true if the given event type signals the end of the lifecycle of a job. After this, all nodes can safely ignore the job.
func (JobEventType) MarshalText ¶
func (je JobEventType) MarshalText() ([]byte, error)
func (JobEventType) String ¶
func (i JobEventType) String() string
func (*JobEventType) UnmarshalText ¶
func (je *JobEventType) UnmarshalText(text []byte) (err error)
type JobExecutionPlan ¶
type JobExecutionPlan struct { // how many shards are there in total for this job // we are expecting this number x concurrency total // JobShardState objects for this job TotalShards int `json:"ShardsTotal,omitempty"` }
type JobLocalEvent ¶
type JobLocalEvent struct { EventName JobLocalEventType `json:"EventName,omitempty"` JobID string `json:"JobID,omitempty"` ShardIndex int `json:"ShardIndex,omitempty"` TargetNodeID string `json:"TargetNodeID,omitempty"` }
gives us a way to keep local data against a job so our compute node and requester node control loops can keep state against a job without broadcasting it to the rest of the network
type JobLocalEventType ¶
type JobLocalEventType int
const ( // compute node // this means "we have selected this job" // used to avoid calling external selection hooks // where capacity manager says we can't quite run // the job yet but we will want to bid when there // is space JobLocalEventSelected JobLocalEventType // compute node // this means "we have bid" on a job where "we" // is the compute node JobLocalEventBid // requester node // used to avoid race conditions with the requester // node knowing which bids it's already responded to JobLocalEventBidAccepted JobLocalEventBidRejected // requester node // flag a job as having already had it's verification done JobLocalEventVerified )
func JobLocalEventTypes ¶
func JobLocalEventTypes() []JobLocalEventType
func ParseJobLocalEventType ¶
func ParseJobLocalEventType(str string) (JobLocalEventType, error)
func (JobLocalEventType) MarshalText ¶
func (jle JobLocalEventType) MarshalText() ([]byte, error)
func (JobLocalEventType) String ¶
func (i JobLocalEventType) String() string
func (*JobLocalEventType) UnmarshalText ¶
func (jle *JobLocalEventType) UnmarshalText(text []byte) (err error)
type JobNodeState ¶
type JobNodeState struct {
Shards map[int]JobShardState `json:"Shards,omitempty"`
}
type JobSelectionDataLocality ¶
type JobSelectionDataLocality int64
Job selection policy configuration
const ( Local JobSelectionDataLocality = 0 Anywhere JobSelectionDataLocality = 1 )
type JobSelectionPolicy ¶
type JobSelectionPolicy struct { // this describes if we should run a job based on // where the data is located - i.e. if the data is "local" // or if the data is "anywhere" Locality JobSelectionDataLocality `json:"locality"` // should we reject jobs that don't specify any data // the default is "accept" RejectStatelessJobs bool `json:"reject_stateless_jobs"` // external hooks that decide if we should take on the job or not // if either of these are given they will override the data locality settings ProbeHTTP string `json:"probe_http,omitempty"` ProbeExec string `json:"probe_exec,omitempty"` }
describe the rules for how a compute node selects an incoming job
func NewDefaultJobSelectionPolicy ¶
func NewDefaultJobSelectionPolicy() JobSelectionPolicy
generate a default empty job selection policy
type JobShardState ¶
type JobShardState struct { // which node is running this shard NodeID string `json:"NodeId,omitempty"` // what shard is this we are running ShardIndex int `json:"ShardIndex,omitempty"` // what is the state of the shard on this node State JobStateType `json:"State,omitempty"` // an arbitrary status message Status string `json:"Status,omitempty"` // the proposed results for this shard // this will be resolved by the verifier somehow VerificationProposal []byte `json:"VerificationProposal,omitempty"` VerificationResult VerificationResult `json:"VerificationResult,omitempty"` PublishedResult StorageSpec `json:"PublishedResults,omitempty"` // RunOutput of the job RunOutput *RunCommandResult `json:"RunOutput,omitempty"` }
type JobShardingConfig ¶
type JobShardingConfig struct { // divide the inputs up into the smallest possible unit // for example /* would mean "all top level files or folders" // this being an empty string means "no sharding" GlobPattern string `json:"GlobPattern,omitempty"` // how many "items" are to be processed in each shard // we first apply the glob pattern which will result in a flat list of items // this number decides how to group that flat list into actual shards run by compute nodes BatchSize int `json:"BatchSize,omitempty"` // when using multiple input volumes // what path do we treat as the common mount path to apply the glob pattern to BasePath string `json:"GlobPatternBasePath,omitempty"` }
describe how we chunk a job up into shards
type JobSpecDocker ¶
type JobSpecDocker struct { // this should be pullable by docker Image string `json:"Image,omitempty"` // optionally override the default entrypoint Entrypoint []string `json:"Entrypoint,omitempty"` // a map of env to run the container with EnvironmentVariables []string `json:"EnvironmentVariables,omitempty"` // working directory inside the container WorkingDirectory string `json:"WorkingDirectory,omitempty"` }
for VM style executors
type JobSpecLanguage ¶
type JobSpecLanguage struct { Language string `json:"Language,omitempty"` // e.g. python LanguageVersion string `json:"LanguageVersion,omitempty"` // e.g. 3.8 // must this job be run in a deterministic context? Deterministic bool `json:"DeterministicExecution,omitempty"` // context is a tar file stored in ipfs, containing e.g. source code and requirements Context StorageSpec `json:"JobContext,omitempty"` // optional program specified on commandline, like python -c "print(1+1)" Command string `json:"Command,omitempty"` // optional program path relative to the context dir. one of Command or ProgramPath must be specified ProgramPath string `json:"ProgramPath,omitempty"` // optional requirements.txt (or equivalent) path relative to the context dir RequirementsPath string `json:"RequirementsPath,omitempty"` }
for language style executors (can target docker or wasm)
type JobSpecWasm ¶
type JobSpecWasm struct { // The name of the function in the EntryModule to call to run the job. For // WASI jobs, this will always be `_start`, but jobs can choose to call // other WASM functions instead. The EntryPoint must be a zero-parameter // zero-result function. EntryPoint string `json:"EntryPoint,omitempty"` // The arguments supplied to the program (i.e. as ARGV). Parameters []string `json:"Parameters,omitempty"` // The variables available in the environment of the running program. EnvironmentVariables map[string]string `json:"EnvironmentVariables,omitempty"` // TODO #880: Other WASM modules whose exports will be available as imports // to the EntryModule. ImportModules []StorageSpec `json:"ImportModules,omitempty"` }
Describes a raw WASM job
type JobState ¶
type JobState struct {
Nodes map[string]JobNodeState `json:"Nodes,omitempty"`
}
The state of a job across the whole network generally be in different states on different nodes - one node may be ignoring a job as its bid was rejected, while another node may be submitting results for the job to the requester node.
Each node will produce an array of JobShardState one for each shard (jobs without a sharding config will still have sharded job states - just with a shard count of 1). Any code that is determining the current "state" of a job must look at both:
- the ShardCount of the JobExecutionPlan
- the collection of JobShardState to determine the current state
Note: JobState itself is not mutable - the JobExecutionPlan and JobShardState are updatable and the JobState is queried by the rest of the system.
type JobStateType ¶
type JobStateType int
JobStateType is the state of a job on a particular node. Note that the job will typically have different states on different nodes.
const ( // a compute node has selected a job and has bid on it // we are currently waiting to hear back from the requester // node whether our bid was accepted or not JobStateBidding JobStateType // the bid has been accepted but we have not yet started the job JobStateWaiting // the job is in the process of running JobStateRunning // the compute node has finished execution and has communicated the ResultsProposal JobStateVerifying // a requester node has either rejected the bid or the compute node has canceled the bid // either way - this node will not progress with this job any more JobStateCancelled // the job had an error - this is an end state JobStateError // our results have been processed and published JobStateCompleted )
these are the states a job can be in against a single node
func GetStateFromEvent ¶
func GetStateFromEvent(eventType JobEventType) JobStateType
given an event name - return a job state
func JobStateTypes ¶
func JobStateTypes() []JobStateType
func ParseJobStateType ¶
func ParseJobStateType(str string) (JobStateType, error)
func (JobStateType) HasPassedBidAcceptedStage ¶
func (s JobStateType) HasPassedBidAcceptedStage() bool
func (JobStateType) IsComplete ¶
func (s JobStateType) IsComplete() bool
IsComplete returns true if the given job has succeeded at the bid stage and has finished running the job - this is used to calculate if a job has completed across all nodes because a cancelation does not count towards actually "running" the job whereas an error does (even though it failed it still "ran")
func (JobStateType) IsError ¶
func (s JobStateType) IsError() bool
func (JobStateType) IsTerminal ¶
func (s JobStateType) IsTerminal() bool
IsTerminal returns true if the given job type signals the end of the lifecycle of that job on a particular node. After this, the job can be safely ignored by the node.
func (JobStateType) MarshalText ¶
func (s JobStateType) MarshalText() ([]byte, error)
func (JobStateType) String ¶
func (i JobStateType) String() string
func (*JobStateType) UnmarshalText ¶
func (s *JobStateType) UnmarshalText(text []byte) (err error)
type JobWithInfo ¶
type JobWithInfo struct { Job Job `json:"Job,omitempty"` JobState JobState `json:"JobState,omitempty"` JobEvents []JobEvent `json:"JobEvents,omitempty"` JobLocalEvents []JobLocalEvent `json:"JobLocalEvents,omitempty"` }
JobWithInfo is the job request + the result of attempting to run it on the network
type PublicKey ¶
type PublicKey []byte
func (PublicKey) MarshalText ¶
func (*PublicKey) UnmarshalText ¶
type PublishedResult ¶
type PublishedResult struct { NodeID string `json:"NodeID,omitempty"` ShardIndex int `json:"ShardIndex,omitempty"` Data StorageSpec `json:"Data,omitempty"` }
PublishedStorageSpec is a wrapper for a StorageSpec that has been published by a compute provider - it keeps info about the host, job and shard that lead to the given storage spec being published
type Publisher ¶
type Publisher int
const ( PublisherNoop Publisher PublisherIpfs PublisherFilecoin PublisherEstuary )
func ParsePublisher ¶
func PublisherTypes ¶
func PublisherTypes() []Publisher
func (Publisher) MarshalText ¶
func (*Publisher) UnmarshalText ¶
type ResourceUsageConfig ¶
type ResourceUsageConfig struct { // https://github.com/BTBurke/k8sresource string CPU string `json:"CPU,omitempty"` // github.com/c2h5oh/datasize string Memory string `json:"Memory,omitempty"` Disk string `json:"Disk,omitempty"` GPU string `json:"GPU"` // unsigned integer string }
type ResourceUsageData ¶
type ResourceUsageData struct { // cpu units CPU float64 `json:"CPU,omitempty" example:"9.600000000000001"` // bytes Memory uint64 `json:"Memory,omitempty" example:"27487790694"` // bytes Disk uint64 `json:"Disk,omitempty" example:"212663867801"` GPU uint64 `json:"GPU,omitempty" example:"1"` //nolint:lll // Support whole GPUs only, like https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/ }
these are the numeric values in bytes for ResourceUsageConfig
func (ResourceUsageData) Add ¶
func (r ResourceUsageData) Add(other ResourceUsageData) ResourceUsageData
func (ResourceUsageData) Intersect ¶
func (r ResourceUsageData) Intersect(other ResourceUsageData) ResourceUsageData
func (ResourceUsageData) IsZero ¶
func (r ResourceUsageData) IsZero() bool
func (ResourceUsageData) LessThanEq ¶
func (r ResourceUsageData) LessThanEq(other ResourceUsageData) bool
func (ResourceUsageData) Max ¶
func (r ResourceUsageData) Max(other ResourceUsageData) ResourceUsageData
func (ResourceUsageData) Multi ¶
func (r ResourceUsageData) Multi(factor float64) ResourceUsageData
func (ResourceUsageData) String ¶
func (r ResourceUsageData) String() string
return string representation of ResourceUsageData
func (ResourceUsageData) Sub ¶
func (r ResourceUsageData) Sub(other ResourceUsageData) ResourceUsageData
type ResourceUsageProfile ¶
type ResourceUsageProfile struct { // how many resources does the job want to consume Job ResourceUsageData `json:"Job,omitempty"` // how many resources is the system currently using SystemUsing ResourceUsageData `json:"SystemUsing,omitempty"` // what is the total amount of resources available to the system SystemTotal ResourceUsageData `json:"SystemTotal,omitempty"` }
type RunCommandResult ¶
type RunCommandResult struct { // stdout of the run. Yaml provided for `describe` output STDOUT string `json:"stdout"` // bool describing if stdout was truncated StdoutTruncated bool `json:"stdouttruncated"` // stderr of the run. STDERR string `json:"stderr"` // bool describing if stderr was truncated StderrTruncated bool `json:"stderrtruncated"` // exit code of the run. ExitCode int `json:"exitCode"` // Runner error ErrorMsg string `json:"runnerError"` }
func NewRunCommandResult ¶
func NewRunCommandResult() *RunCommandResult
type Spec ¶
type Spec struct { // e.g. docker or language Engine Engine `json:"Engine,omitempty"` Verifier Verifier `json:"Verifier,omitempty"` // there can be multiple publishers for the job Publisher Publisher `json:"Publisher,omitempty"` // executor specific data Docker JobSpecDocker `json:"Docker,omitempty"` Language JobSpecLanguage `json:"Language,omitempty"` Wasm JobSpecWasm `json:"Wasm,omitempty"` // the compute (cpu, ram) resources this job requires Resources ResourceUsageConfig `json:"Resources,omitempty"` // How long a job can run in seconds before it is killed. // This includes the time required to run, verify and publish results Timeout float64 `json:"Timeout,omitempty"` // the data volumes we will read in the job // for example "read this ipfs cid" // TODO: #667 Replace with "Inputs", "Outputs" (note the caps) for yaml/json when we update the n.js file Inputs []StorageSpec `json:"inputs,omitempty"` // Input volumes that will not be sharded // for example to upload code into a base image // every shard will get the full range of context volumes Contexts []StorageSpec `json:"Contexts,omitempty"` // the data volumes we will write in the job // for example "write the results to ipfs" Outputs []StorageSpec `json:"outputs,omitempty"` // Annotations on the job - could be user or machine assigned Annotations []string `json:"Annotations,omitempty"` // the sharding config for this job // describes how the job might be split up into parallel shards Sharding JobShardingConfig `json:"Sharding,omitempty"` // Do not track specified by the client DoNotTrack bool `json:"DoNotTrack,omitempty"` }
Spec is a complete specification of a job that can be run on some execution provider.
type StorageSourceType ¶
type StorageSourceType int
StorageSourceType is somewhere we can get data from e.g. ipfs / S3 are storage sources there can be multiple drivers for the same source e.g. ipfs fuse vs ipfs api copy
const ( StorageSourceIPFS StorageSourceType StorageSourceURLDownload StorageSourceFilecoinUnsealed StorageSourceFilecoin StorageSourceEstuary )
func ParseStorageSourceType ¶
func ParseStorageSourceType(str string) (StorageSourceType, error)
func StorageSourceTypes ¶
func StorageSourceTypes() []StorageSourceType
func (StorageSourceType) MarshalText ¶
func (ss StorageSourceType) MarshalText() ([]byte, error)
func (StorageSourceType) String ¶
func (i StorageSourceType) String() string
func (*StorageSourceType) UnmarshalText ¶
func (ss *StorageSourceType) UnmarshalText(text []byte) (err error)
type StorageSpec ¶
type StorageSpec struct { // StorageSource is the abstract source of the data. E.g. a storage source // might be a URL download, but doesn't specify how the execution engine // does the download or what it will do with the downloaded data. StorageSource StorageSourceType `json:"StorageSource,omitempty"` // Name of the spec's data, for reference. Name string `` //nolint:lll /* 132-byte string literal not displayed */ // The unique ID of the data, where it makes sense (for example, in an // IPFS storage spec this will be the data's CID). // NOTE: The below is capitalized to match IPFS & IPLD (even though it's out of golang fmt) CID string `json:"CID,omitempty" example:"QmTVmC7JBD2ES2qGPqBNVWnX1KeEPNrPGb7rJ8cpFgtefe"` // Source URL of the data URL string `json:"URL,omitempty"` // The path that the spec's data should be mounted on, where it makes // sense (for example, in a Docker storage spec this will be a filesystem // path). // TODO: #668 Replace with "Path" (note the caps) for yaml/json when we update the n.js file Path string `json:"path,omitempty"` // Additional properties specific to each driver Metadata map[string]string `json:"Metadata,omitempty"` }
StorageSpec represents some data on a storage engine. Storage engines are specific to particular execution engines, as different execution engines will mount data in different ways.
type VerificationResult ¶
type VerificationResult struct { Complete bool `json:"Complete,omitempty"` Result bool `json:"Result,omitempty"` }
we need to use a struct for the result because: a) otherwise we don't know if VerificationResult==false means "I've not verified yet" or "verification failed" b) we might want to add further fields to the result later
type Verifier ¶
type Verifier int
const ( VerifierNoop Verifier VerifierDeterministic )
func ParseVerifier ¶
func VerifierTypes ¶
func VerifierTypes() []Verifier
func (Verifier) MarshalText ¶
func (*Verifier) UnmarshalText ¶
Source Files ¶
- apiversion.go
- apiversion_string.go
- buildversion.go
- command.go
- debug.go
- engine.go
- engine_string.go
- job.go
- job_selection.go
- job_state.go
- jobeventtype.go
- jobeventtype_string.go
- joblocaleventtype.go
- joblocaleventtype_string.go
- jobstatetype_string.go
- public_key.go
- publisher.go
- publisher_string.go
- resource_usage.go
- storage_source.go
- storage_spec.go
- storagesourcetype_string.go
- teststructs.go
- tracer.go
- utils.go
- verifier.go
- verifier_string.go