Documentation ¶
Overview ¶
The queue service is responsible for accepting tasks and track their state as they are executed by workers. In order ensure they are eventually resolved.
This document describes the API end-points offered by the queue. These end-points targets the following audience:
- Schedulers, who create tasks to be executed,
- Workers, who execute tasks, and
- Tools, that wants to inspect the state of a task.
See:
How to use this package ¶
First create a Queue object:
queue := tcqueue.New(nil)
and then call one or more of queue's methods, e.g.:
err := queue.Ping(.....)
handling any errors...
if err != nil { // handle error... }
Taskcluster Schema ¶
The source code of this go package was auto-generated from the API definition at https://taskcluster-staging.net/references/queue/v1/api.json together with the input and output schemas it references, downloaded on Thu, 27 Jun 2019 at 07:22:00 UTC. The code was generated by https://github.com/taskcluster/taskcluster-client-go/blob/master/build.sh.
Index ¶
- type Action
- type Artifact
- type AzureArtifactRequest
- type AzureArtifactResponse
- type BlobArtifactRequest
- type BlobArtifactResponse
- type ClaimWorkRequest
- type ClaimWorkResponse
- type CompleteArtifactRequest
- type CountPendingTasksResponse
- type ErrorArtifactRequest
- type ErrorArtifactResponse
- type HTTPRequest
- type ListArtifactsResponse
- type ListDependentTasksResponse
- type ListProvisionersResponse
- type ListTaskGroupResponse
- type ListWorkerTypesResponse
- type ListWorkersResponse
- type MultipartPart
- type PostArtifactRequest
- type PostArtifactResponse
- type ProvisionerInformation
- type ProvisionerRequest
- type ProvisionerResponse
- type QuarantineWorkerRequest
- type Queue
- func (queue *Queue) CancelTask(taskId string) (*TaskStatusResponse, error)
- func (queue *Queue) ClaimTask(taskId, runId string, payload *TaskClaimRequest) (*TaskClaimResponse, error)
- func (queue *Queue) ClaimWork(provisionerId, workerType string, payload *ClaimWorkRequest) (*ClaimWorkResponse, error)
- func (queue *Queue) CompleteArtifact(taskId, runId, name string, payload *CompleteArtifactRequest) error
- func (queue *Queue) CreateArtifact(taskId, runId, name string, payload *PostArtifactRequest) (*PostArtifactResponse, error)
- func (queue *Queue) CreateTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error)
- func (queue *Queue) DeclareProvisioner(provisionerId string, payload *ProvisionerRequest) (*ProvisionerResponse, error)
- func (queue *Queue) DeclareWorker(provisionerId, workerType, workerGroup, workerId string, ...) (*WorkerResponse, error)
- func (queue *Queue) DeclareWorkerType(provisionerId, workerType string, payload *WorkerTypeRequest) (*WorkerTypeResponse, error)
- func (queue *Queue) DefineTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error)
- func (queue *Queue) GetArtifact(taskId, runId, name string) error
- func (queue *Queue) GetArtifact_SignedURL(taskId, runId, name string, duration time.Duration) (*url.URL, error)
- func (queue *Queue) GetLatestArtifact(taskId, name string) error
- func (queue *Queue) GetLatestArtifact_SignedURL(taskId, name string, duration time.Duration) (*url.URL, error)
- func (queue *Queue) GetProvisioner(provisionerId string) (*ProvisionerResponse, error)
- func (queue *Queue) GetWorker(provisionerId, workerType, workerGroup, workerId string) (*WorkerResponse, error)
- func (queue *Queue) GetWorkerType(provisionerId, workerType string) (*WorkerTypeResponse, error)
- func (queue *Queue) ListArtifacts(taskId, runId, continuationToken, limit string) (*ListArtifactsResponse, error)
- func (queue *Queue) ListDependentTasks(taskId, continuationToken, limit string) (*ListDependentTasksResponse, error)
- func (queue *Queue) ListLatestArtifacts(taskId, continuationToken, limit string) (*ListArtifactsResponse, error)
- func (queue *Queue) ListProvisioners(continuationToken, limit string) (*ListProvisionersResponse, error)
- func (queue *Queue) ListTaskGroup(taskGroupId, continuationToken, limit string) (*ListTaskGroupResponse, error)
- func (queue *Queue) ListWorkerTypes(provisionerId, continuationToken, limit string) (*ListWorkerTypesResponse, error)
- func (queue *Queue) ListWorkers(provisionerId, workerType, continuationToken, limit, quarantined string) (*ListWorkersResponse, error)
- func (queue *Queue) PendingTasks(provisionerId, workerType string) (*CountPendingTasksResponse, error)
- func (queue *Queue) Ping() error
- func (queue *Queue) QuarantineWorker(provisionerId, workerType, workerGroup, workerId string, ...) (*WorkerResponse, error)
- func (queue *Queue) ReclaimTask(taskId, runId string) (*TaskReclaimResponse, error)
- func (queue *Queue) ReportCompleted(taskId, runId string) (*TaskStatusResponse, error)
- func (queue *Queue) ReportException(taskId, runId string, payload *TaskExceptionRequest) (*TaskStatusResponse, error)
- func (queue *Queue) ReportFailed(taskId, runId string) (*TaskStatusResponse, error)
- func (queue *Queue) RerunTask(taskId string) (*TaskStatusResponse, error)
- func (queue *Queue) ScheduleTask(taskId string) (*TaskStatusResponse, error)
- func (queue *Queue) Status(taskId string) (*TaskStatusResponse, error)
- func (queue *Queue) Task(taskId string) (*TaskDefinitionResponse, error)
- type RedirectArtifactRequest
- type RedirectArtifactResponse
- type RunInformation
- type S3ArtifactRequest
- type S3ArtifactResponse
- type TaskClaim
- type TaskClaimRequest
- type TaskClaimResponse
- type TaskCredentials
- type TaskDefinitionAndStatus
- type TaskDefinitionRequest
- type TaskDefinitionResponse
- type TaskExceptionRequest
- type TaskMetadata
- type TaskReclaimResponse
- type TaskRun
- type TaskStatusResponse
- type TaskStatusStructure
- type Worker
- type WorkerAction
- type WorkerRequest
- type WorkerResponse
- type WorkerType
- type WorkerTypeAction
- type WorkerTypeRequest
- type WorkerTypeResponse
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Action ¶
type Action struct { // Actions have a "context" that is one of provisioner, worker-type, or worker, indicating // which it applies to. `context` is used by the front-end to know where to display the action. // // | `context` | Page displayed | // |-------------|-----------------------| // | provisioner | Provisioner Explorer | // | worker-type | Workers Explorer | // | worker | Worker Explorer | // // Possible values: // * "provisioner" // * "worker-type" // * "worker" // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/context Context string `json:"context"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/description Description string `json:"description"` // Method to indicate the desired action to be performed for a given resource. // // Possible values: // * "POST" // * "PUT" // * "DELETE" // * "PATCH" // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/method Method string `json:"method"` // Short names for things like logging/error messages. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/name Name string `json:"name"` // Appropriate title for any sort of Modal prompt. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/title Title json.RawMessage `json:"title"` // When an action is triggered, a request is made using the `url` and `method`. // Depending on the `context`, the following parameters will be substituted in the url: // // | `context` | Path parameters | // |-------------|----------------------------------------------------------| // | provisioner | <provisionerId> | // | worker-type | <provisionerId>, <workerType> | // | worker | <provisionerId>, <workerType>, <workerGroup>, <workerId> | // // _Note: The request needs to be signed with the user's Taskcluster credentials._ // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items/properties/url URL string `json:"url"` }
Actions provide a generic mechanism to expose additional features of a provisioner, worker type, or worker to Taskcluster clients.
An action is comprised of metadata describing the feature it exposes, together with a webhook for triggering it.
The Taskcluster tools site, for example, retrieves actions when displaying provisioners, worker types and workers. It presents the provisioner/worker type/worker specific actions to the user. When the user triggers an action, the web client takes the registered webhook, substitutes parameters into the URL (see `url`), signs the requests with the Taskcluster credentials of the user operating the web interface, and issues the HTTP request.
The level to which the action relates (provisioner, worker type, worker) is called the action context. All actions, regardless of the action contexts, are registered against the provisioner when calling `queue.declareProvisioner`.
The action context is used by the web client to determine where in the web interface to present the action to the user as follows:
| `context` | Tool where action is displayed | |-------------|--------------------------------| | provisioner | Provisioner Explorer | | worker-type | Workers Explorer | | worker | Worker Explorer |
See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) for more information.
See https://taskcluster-staging.net/schemas/queue/v1/actions.json#/items
type Artifact ¶
type Artifact struct { // Mimetype for the artifact that was created. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/artifacts/items/properties/contentType ContentType string `json:"contentType"` // Date and time after which the artifact created will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/artifacts/items/properties/expires Expires tcclient.Time `json:"expires"` // Name of the artifact that was created, this is useful if you want to // attempt to fetch the artifact. // // Max length: 1024 // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/artifacts/items/properties/name Name string `json:"name"` // This is the `storageType` for the request that was used to create // the artifact. // // Possible values: // * "blob" // * "s3" // * "azure" // * "reference" // * "error" // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/artifacts/items/properties/storageType StorageType string `json:"storageType"` }
Information about an artifact for the given `taskId` and `runId`.
type AzureArtifactRequest ¶
type AzureArtifactRequest struct { // Artifact mime-type, when uploading artifact please use the same // `Content-Type`, consistently using the correct mime-type make // tooling a lot easier, specifically, always using `application/json` // for JSON artifacts. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[2]/properties/contentType ContentType string `json:"contentType"` // Date-time after which the artifact should be deleted. // Note, that these will be collected over time, and artifacts may // remain available after expiration. Azure based artifacts are // identified in azure table storage and explicitly deleted in the // azure storage container after expiration. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[2]/properties/expires Expires tcclient.Time `json:"expires"` // Artifact storage type, in this case `azure` // // Possible values: // * "azure" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[2]/properties/storageType StorageType string `json:"storageType"` }
Request for an Azure Shared Access Signature (SAS) that will allow you to upload an artifact to an Azure blob storage container managed by the queue.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[2]
type AzureArtifactResponse ¶
type AzureArtifactResponse struct { // Artifact mime-type, should be specified with the // `x-ms-blob-content-type` when committing the block. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[2]/properties/contentType ContentType string `json:"contentType"` // Date-time after which Shared Access Signature (SAS) will // seize to work. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[2]/properties/expires Expires tcclient.Time `json:"expires"` // Shared Access Signature (SAS) with write permissions, see // [Azure REST API] // (http://msdn.microsoft.com/en-US/library/azure/dn140256.aspx) // reference for details on how to use this. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[2]/properties/putUrl PutURL string `json:"putUrl"` // Artifact storage type, in this case `azure` // // Possible values: // * "azure" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[2]/properties/storageType StorageType string `json:"storageType"` }
Response to a request for an Azure Shared Access Signature (SAS) that will allow you to upload an artifact to an Azure blob storage container managed by the queue.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[2]
type BlobArtifactRequest ¶
type BlobArtifactRequest struct { // Optionally provide an encoding type which should be set as the HTTP // Content-Encoding header for this artifact. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/contentEncoding ContentEncoding string `json:"contentEncoding,omitempty"` // The number of bytes of the entire artifact. This must be the number // of bytes in the file to be uploaded. For single part uploads, the // upload will fail if the number of bytes uploaded does not match this // value. A single part upload (e.g. no parts list) may be at most 5GB. // This limit is enforced in the code because it is not possible to // represent all of the restrictions in a json-schema. A multipart // upload may be at most 5TB, with each part other than the last being // between 5MB and 5GB in size. // // Mininum: 0 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/contentLength ContentLength int64 `json:"contentLength"` // The complete SHA256 value of the entire artifact. This must be the // SHA256 of the file which is to be uploaded. For single part uploads, // the upload will fail if the SHA256 value of what is uploaded does not // match this value // // Syntax: ^[a-fA-F0-9]{64}$ // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/contentSha256 ContentSha256 string `json:"contentSha256"` // Artifact mime-type, when uploading artifact to the signed // `PUT` URL returned from this request this must given with the // `ContentType` header. Please, provide correct mime-type, // this make tooling a lot easier, specifically, // always using `application/json` for JSON artifacts. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/contentType ContentType string `json:"contentType"` // Date-time after which the artifact should be deleted. Note, that // these will be collected over time, and artifacts may remain // available after expiration. S3 based artifacts are identified in // azure table storage and explicitly deleted on S3 after expiration. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/expires Expires tcclient.Time `json:"expires"` // A list of parts for a multipart upload. The presence of this list is // how a multipart upload is differentiated from a single part upload. // The items in this list represent individual parts for upload. For a // multipart upload, the sha256 values provided here must match the // sha256 value that S3 internally computes for the upload to be // considered a success. The overall sha256 value is not checked // explicitly because the S3 API does not allow for that, but the same // code that is responsible for generating the parts hashes would also // be generating the overall hash, which makes this less of a concern. // The worst case is that we have artifacts which incorrectly do not // validate, which is not as big of a security concern. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/parts Parts []MultipartPart `json:"parts,omitempty"` // Artifact storage type, in this case `'blob'` // // Possible values: // * "blob" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/storageType StorageType string `json:"storageType"` // The number of bytes transfered across the wire to the backing // datastore. If specified, it represents the post-content-encoding // byte count // // Mininum: 0 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/transferLength TransferLength int64 `json:"transferLength,omitempty"` // This is the sha256 of the bytes transfered across the wire to the // backing datastore. If specified, it represents the // post-content-encoding sha256 value // // Syntax: ^[a-fA-F0-9]{64}$ // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/transferSha256 TransferSha256 string `json:"transferSha256,omitempty"` }
Request a list of requests in a generalized format which can be run to upload an artifact to storage managed by the queue.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]
type BlobArtifactResponse ¶
type BlobArtifactResponse struct { // Date-time after which the signed `requests` no longer work // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/expires Expires tcclient.Time `json:"expires"` // A list of generalized HTTP requests which must be run to upload the // artifact. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/requests Requests []HTTPRequest `json:"requests"` // Artifact storage type, in this case `'blob'` // // Possible values: // * "blob" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/storageType StorageType string `json:"storageType"` }
Response to a request for creating a new blob artifact
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]
type ClaimWorkRequest ¶
type ClaimWorkRequest struct { // Number of tasks to attempt to claim. // // Default: 1 // Mininum: 1 // Maximum: 32 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-request.json#/properties/tasks Tasks int64 `json:"tasks"` // Identifier for group that worker claiming the task is a part of. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-request.json#/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for worker within the given workerGroup // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-request.json#/properties/workerId WorkerID string `json:"workerId"` }
Request to claim a task for a worker to process.
See https://taskcluster-staging.net/schemas/queue/v1/claim-work-request.json#
type ClaimWorkResponse ¶
type ClaimWorkResponse struct { // List of task claims, may be empty if no tasks was claimed, in which case // the worker should sleep a tiny bit before polling again. // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks Tasks []TaskClaim `json:"tasks"` }
Response to an attempt to claim tasks for a worker to process.
See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#
type CompleteArtifactRequest ¶
type CompleteArtifactRequest struct { // string value provided by the API. // // Array items: // See https://taskcluster-staging.net/schemas/queue/v1/put-artifact-request.json#/properties/etags/items // // See https://taskcluster-staging.net/schemas/queue/v1/put-artifact-request.json#/properties/etags Etags []string `json:"etags"` }
Complete an aritifact
See https://taskcluster-staging.net/schemas/queue/v1/put-artifact-request.json#
type CountPendingTasksResponse ¶
type CountPendingTasksResponse struct { // An approximate number of pending tasks for the given `provisionerId` and // `workerType`. This is based on Azure Queue Storage metadata API, thus, // number of reported here may be higher than actual number of pending tasks. // But there cannot be more pending tasks reported here. Ie. this is an // **upper-bound** on the number of pending tasks. // // Mininum: 0 // // See https://taskcluster-staging.net/schemas/queue/v1/pending-tasks-response.json#/properties/pendingTasks PendingTasks int64 `json:"pendingTasks"` // Unique identifier for the provisioner // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/pending-tasks-response.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // Identifier for worker type within the specified provisioner // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/pending-tasks-response.json#/properties/workerType WorkerType string `json:"workerType"` }
Response to a request for the number of pending tasks for a given `provisionerId` and `workerType`.
See https://taskcluster-staging.net/schemas/queue/v1/pending-tasks-response.json#
type ErrorArtifactRequest ¶
type ErrorArtifactRequest struct { // Date-time after which the queue should stop replying with the error // and forget about the artifact. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[4]/properties/expires Expires tcclient.Time `json:"expires"` // Human readable explanation of why the artifact is missing // // Max length: 4096 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[4]/properties/message Message string `json:"message"` // Reason why the artifact doesn't exist. // // Possible values: // * "file-missing-on-worker" // * "invalid-resource-on-worker" // * "too-large-file-on-worker" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[4]/properties/reason Reason string `json:"reason"` // Artifact storage type, in this case `error` // // Possible values: // * "error" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[4]/properties/storageType StorageType string `json:"storageType"` }
Request the queue to reply `424` (Failed Dependency) with `reason` and `message` to any `GET` request for this artifact. This is mainly useful as a way for a task to declare that it failed to provide an artifact it wanted to upload.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[4]
type ErrorArtifactResponse ¶
type ErrorArtifactResponse struct { // Artifact storage type, in this case `error` // // Possible values: // * "error" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[4]/properties/storageType StorageType string `json:"storageType"` }
Response to a request for the queue to reply `424` (Failed Dependency) with `reason` and `message` to any `GET` request for this artifact.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[4]
type HTTPRequest ¶
type HTTPRequest struct { // Headers of request // // Map entries: // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/requests/items/properties/headers/additionalProperties // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/requests/items/properties/headers Headers map[string]string `json:"headers"` // HTTP 1.1 method of request // // Possible values: // * "GET" // * "POST" // * "PUT" // * "DELETE" // * "OPTIONS" // * "HEAD" // * "PATCH" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/requests/items/properties/method Method string `json:"method"` // URL of request // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[0]/properties/requests/items/properties/url URL string `json:"url"` }
type ListArtifactsResponse ¶
type ListArtifactsResponse struct { // List of artifacts for given `taskId` and `runId`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/artifacts Artifacts []Artifact `json:"artifacts"` // Opaque `continuationToken` to be given as query-string option to get the // next set of artifacts. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called with `continuationToken` until you get a // result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` }
List of artifacts for a given `taskId` and `runId`.
See https://taskcluster-staging.net/schemas/queue/v1/list-artifacts-response.json#
type ListDependentTasksResponse ¶
type ListDependentTasksResponse struct { // Opaque `continuationToken` to be given as query-string option to get the // next set of dependent tasks. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called `listDependentTasks` with // `continuationToken` until you get a result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-dependent-tasks-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` // Identifier for the task whose dependents are being listed. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/list-dependent-tasks-response.json#/properties/taskId TaskID string `json:"taskId"` // List of tasks that have `taskId` in the `task.dependencies` property. // // See https://taskcluster-staging.net/schemas/queue/v1/list-dependent-tasks-response.json#/properties/tasks Tasks []TaskDefinitionAndStatus `json:"tasks"` }
Response from a `listDependentTasks` request.
See https://taskcluster-staging.net/schemas/queue/v1/list-dependent-tasks-response.json#
type ListProvisionersResponse ¶
type ListProvisionersResponse struct { // Opaque `continuationToken` to be given as query-string option to get the // next set of provisioners. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called with `continuationToken` until you get a // result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners Provisioners []ProvisionerInformation `json:"provisioners"` }
See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#
type ListTaskGroupResponse ¶
type ListTaskGroupResponse struct { // Opaque `continuationToken` to be given as query-string option to get the // next set of tasks in the task-group. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called `listTaskGroup` with `continuationToken` // until you get a result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-task-group-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` // Identifier for the task-group being listed. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/list-task-group-response.json#/properties/taskGroupId TaskGroupID string `json:"taskGroupId"` // List of tasks in this task-group. // // See https://taskcluster-staging.net/schemas/queue/v1/list-task-group-response.json#/properties/tasks Tasks []TaskDefinitionAndStatus `json:"tasks"` }
Response from a `listTaskGroup` request.
See https://taskcluster-staging.net/schemas/queue/v1/list-task-group-response.json#
type ListWorkerTypesResponse ¶
type ListWorkerTypesResponse struct { // Opaque `continuationToken` to be given as query-string option to get the // next set of worker-types in the provisioner. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called `listWorkerTypes` with `continuationToken` // until you get a result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` // List of worker-types in this provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes WorkerTypes []WorkerType `json:"workerTypes"` }
Response from a `listWorkerTypes` request.
See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#
type ListWorkersResponse ¶
type ListWorkersResponse struct { // Opaque `continuationToken` to be given as query-string option to get the // next set of workers in the worker-type. // This property is only present if another request is necessary to fetch all // results. In practice the next request with a `continuationToken` may not // return additional results, but it can. Thus, you can only be sure to have // all the results if you've called `listWorkerTypes` with `continuationToken` // until you get a result without a `continuationToken`. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/continuationToken ContinuationToken string `json:"continuationToken,omitempty"` // List of workers in this worker-type. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/workers Workers []Worker `json:"workers"` }
Response from a `listWorkers` request.
See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#
type MultipartPart ¶
type MultipartPart struct { // The sha256 hash of the part. // // Syntax: ^[a-fA-F0-9]{64}$ // Min length: 64 // Max length: 64 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/parts/items/properties/sha256 Sha256 string `json:"sha256"` // The number of bytes in this part. Keep in mind for S3 that // all but the last part must be minimum 5MB and the maximum for // a single part is 5GB. The overall size may not exceed 5TB // // Mininum: 0 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[0]/properties/parts/items/properties/size Size int64 `json:"size"` }
type PostArtifactRequest ¶
type PostArtifactRequest json.RawMessage
Request a authorization to put and artifact or posting of a URL as an artifact. Note that the `storageType` property is referenced in the response as well.
One of:
- BlobArtifactRequest
- S3ArtifactRequest
- AzureArtifactRequest
- RedirectArtifactRequest
- ErrorArtifactRequest
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#
func (*PostArtifactRequest) MarshalJSON ¶
func (this *PostArtifactRequest) MarshalJSON() ([]byte, error)
MarshalJSON calls json.RawMessage method of the same name. Required since PostArtifactRequest is of type json.RawMessage...
func (*PostArtifactRequest) UnmarshalJSON ¶
func (this *PostArtifactRequest) UnmarshalJSON(data []byte) error
UnmarshalJSON is a copy of the json.RawMessage implementation.
type PostArtifactResponse ¶
type PostArtifactResponse json.RawMessage
Response to a request for posting an artifact. Note that the `storageType` property is referenced in the request as well.
One of:
- BlobArtifactResponse
- S3ArtifactResponse
- AzureArtifactResponse
- RedirectArtifactResponse
- ErrorArtifactResponse
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#
func (*PostArtifactResponse) MarshalJSON ¶
func (this *PostArtifactResponse) MarshalJSON() ([]byte, error)
MarshalJSON calls json.RawMessage method of the same name. Required since PostArtifactResponse is of type json.RawMessage...
func (*PostArtifactResponse) UnmarshalJSON ¶
func (this *PostArtifactResponse) UnmarshalJSON(data []byte) error
UnmarshalJSON is a copy of the json.RawMessage implementation.
type ProvisionerInformation ¶
type ProvisionerInformation struct { // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json# Actions []Action `json:"actions"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners/items/properties/description Description string `json:"description"` // Date and time after which the provisioner created will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners/items/properties/expires Expires tcclient.Time `json:"expires"` // Date and time where the provisioner was last seen active // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners/items/properties/lastDateActive LastDateActive tcclient.Time `json:"lastDateActive"` // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners/items/properties/provisionerId ProvisionerID string `json:"provisionerId"` // This is the stability of the provisioner. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/list-provisioners-response.json#/properties/provisioners/items/properties/stability Stability string `json:"stability"` }
type ProvisionerRequest ¶
type ProvisionerRequest struct { // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json# Actions []Action `json:"actions,omitempty"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/update-provisioner-request.json#/properties/description Description string `json:"description,omitempty"` // Date and time after which the provisioner will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/update-provisioner-request.json#/properties/expires Expires tcclient.Time `json:"expires,omitempty"` // This is the stability of the provisioner. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/update-provisioner-request.json#/properties/stability Stability string `json:"stability,omitempty"` }
Request to update a provisioner.
See https://taskcluster-staging.net/schemas/queue/v1/update-provisioner-request.json#
type ProvisionerResponse ¶
type ProvisionerResponse struct { // See taskcluster [actions](/docs/reference/platform/taskcluster-queue/docs/actions) documentation. // // See https://taskcluster-staging.net/schemas/queue/v1/actions.json# Actions []Action `json:"actions"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#/properties/description Description string `json:"description"` // Date and time after which the provisioner will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#/properties/expires Expires tcclient.Time `json:"expires"` // Date of the last time this provisioner was seen active. `lastDateActive` is updated every 6 hours // but may be off by up-to 6 hours. Nonetheless, `lastDateActive` is a good indicator // of when the provisioner was last seen active. // // See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#/properties/lastDateActive LastDateActive tcclient.Time `json:"lastDateActive"` // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // This is the stability of the provisioner. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#/properties/stability Stability string `json:"stability"` }
Response containing information about a provisioner.
See https://taskcluster-staging.net/schemas/queue/v1/provisioner-response.json#
type QuarantineWorkerRequest ¶
type QuarantineWorkerRequest struct { // Quarantining a worker allows the machine to remain alive but not accept jobs. // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or // somewhere in the past). // // See https://taskcluster-staging.net/schemas/queue/v1/quarantine-worker-request.json#/properties/quarantineUntil QuarantineUntil tcclient.Time `json:"quarantineUntil"` }
Request to update a worker's quarantineUntil property.
See https://taskcluster-staging.net/schemas/queue/v1/quarantine-worker-request.json#
type Queue ¶
func New ¶
func New(credentials *tcclient.Credentials, rootURL string) *Queue
New returns a Queue client, configured to run against production. Pass in nil credentials to create a client without authentication. The returned client is mutable, so returned settings can be altered.
queue := tcqueue.New( nil, // client without authentication "http://localhost:1234/my/taskcluster", // taskcluster hosted at this root URL on local machine ) err := queue.Ping(.....) // for example, call the Ping(.....) API endpoint (described further down)... if err != nil { // handle errors... }
func NewFromEnv ¶
func NewFromEnv() *Queue
NewFromEnv returns a *Queue configured from environment variables.
The root URL is taken from TASKCLUSTER_PROXY_URL if set to a non-empty string, otherwise from TASKCLUSTER_ROOT_URL if set, otherwise the empty string.
The credentials are taken from environment variables:
TASKCLUSTER_CLIENT_ID TASKCLUSTER_ACCESS_TOKEN TASKCLUSTER_CERTIFICATE
If TASKCLUSTER_CLIENT_ID is empty/unset, authentication will be disabled.
func (*Queue) CancelTask ¶
func (queue *Queue) CancelTask(taskId string) (*TaskStatusResponse, error)
This method will cancel a task that is either `unscheduled`, `pending` or `running`. It will resolve the current run as `exception` with `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie. it doesn't have any runs, an initial run will be added and resolved as described above. Hence, after canceling a task, it cannot be scheduled with `queue.scheduleTask`, but a new run can be created with `queue.rerun`. These semantics is equivalent to calling `queue.scheduleTask` immediately followed by `queue.cancelTask`.
**Remark** this operation is idempotent, if you try to cancel a task that isn't `unscheduled`, `pending` or `running`, this operation will just return the current task status.
Required scopes:
Any of: - queue:cancel-task:<schedulerId>/<taskGroupId>/<taskId> - All of: * queue:cancel-task * assume:scheduler-id:<schedulerId>/<taskGroupId>
See #cancelTask
func (*Queue) ClaimTask ¶
func (queue *Queue) ClaimTask(taskId, runId string, payload *TaskClaimRequest) (*TaskClaimResponse, error)
Stability: *** DEPRECATED ***
claim a task - never documented
Required scopes:
Any of: - All of: * queue:claim-task:<provisionerId>/<workerType> * queue:worker-id:<workerGroup>/<workerId> - All of: * queue:claim-task * assume:worker-type:<provisionerId>/<workerType> * assume:worker-id:<workerGroup>/<workerId>
See #claimTask
func (*Queue) ClaimWork ¶
func (queue *Queue) ClaimWork(provisionerId, workerType string, payload *ClaimWorkRequest) (*ClaimWorkResponse, error)
Claim pending task(s) for the given `provisionerId`/`workerType` queue.
If any work is available (even if fewer than the requested number of tasks, this will return immediately. Otherwise, it will block for tens of seconds waiting for work. If no work appears, it will return an emtpy list of tasks. Callers should sleep a short while (to avoid denial of service in an error condition) and call the endpoint again. This is a simple implementation of "long polling".
Required scopes:
All of: * queue:claim-work:<provisionerId>/<workerType> * queue:worker-id:<workerGroup>/<workerId>
See #claimWork
func (*Queue) CompleteArtifact ¶
func (queue *Queue) CompleteArtifact(taskId, runId, name string, payload *CompleteArtifactRequest) error
Stability: *** EXPERIMENTAL ***
This endpoint finalises an upload done through the blob `storageType`. The queue will ensure that the task/run is still allowing artifacts to be uploaded. For single-part S3 blob artifacts, this endpoint will simply ensure the artifact is present in S3. For multipart S3 artifacts, the endpoint will perform the commit step of the multipart upload flow. As the final step for both multi and single part artifacts, the `present` entity field will be set to `true` to reflect that the artifact is now present and a message published to pulse. NOTE: This endpoint *must* be called for all artifacts of storageType 'blob'
Required scopes:
Any of: - queue:create-artifact:<taskId>/<runId> - All of: * queue:create-artifact:<name> * assume:worker-id:<workerGroup>/<workerId>
See #completeArtifact
func (*Queue) CreateArtifact ¶
func (queue *Queue) CreateArtifact(taskId, runId, name string, payload *PostArtifactRequest) (*PostArtifactResponse, error)
This API end-point creates an artifact for a specific run of a task. This should **only** be used by a worker currently operating on this task, or from a process running within the task (ie. on the worker).
All artifacts must specify when they `expires`, the queue will automatically take care of deleting artifacts past their expiration point. This features makes it feasible to upload large intermediate artifacts from data processing applications, as the artifacts can be set to expire a few days later.
We currently support 3 different `storageType`s, each storage type have slightly different features and in some cases difference semantics. We also have 2 deprecated `storageType`s which are only maintained for backwards compatiability and should not be used in new implementations
**Blob artifacts**, are useful for storing large files. Currently, these are all stored in S3 but there are facilities for adding support for other backends in futre. A call for this type of artifact must provide information about the file which will be uploaded. This includes sha256 sums and sizes. This method will return a list of general form HTTP requests which are signed by AWS S3 credentials managed by the Queue. Once these requests are completed the list of `ETag` values returned by the requests must be passed to the queue `completeArtifact` method
**S3 artifacts**, DEPRECATED is useful for static files which will be stored on S3. When creating an S3 artifact the queue will return a pre-signed URL to which you can do a `PUT` request to upload your artifact. Note that `PUT` request **must** specify the `content-length` header and **must** give the `content-type` header the same value as in the request to `createArtifact`.
**Azure artifacts**, DEPRECATED are stored in _Azure Blob Storage_ service which given the consistency guarantees and API interface offered by Azure is more suitable for artifacts that will be modified during the execution of the task. For example docker-worker has a feature that persists the task log to Azure Blob Storage every few seconds creating a somewhat live log. A request to create an Azure artifact will return a URL featuring a [Shared-Access-Signature](http://msdn.microsoft.com/en-us/library/azure/dn140256.aspx), refer to MSDN for further information on how to use these. **Warning: azure artifact is currently an experimental feature subject to changes and data-drops.**
**Reference artifacts**, only consists of meta-data which the queue will store for you. These artifacts really only have a `url` property and when the artifact is requested the client will be redirect the URL provided with a `303` (See Other) redirect. Please note that we cannot delete artifacts you upload to other service, we can only delete the reference to the artifact, when it expires.
**Error artifacts**, only consists of meta-data which the queue will store for you. These artifacts are only meant to indicate that you the worker or the task failed to generate a specific artifact, that you would otherwise have uploaded. For example docker-worker will upload an error artifact, if the file it was supposed to upload doesn't exists or turns out to be a directory. Clients requesting an error artifact will get a `424` (Failed Dependency) response. This is mainly designed to ensure that dependent tasks can distinguish between artifacts that were suppose to be generated and artifacts for which the name is misspelled.
**Artifact immutability**, generally speaking you cannot overwrite an artifact when created. But if you repeat the request with the same properties the request will succeed as the operation is idempotent. This is useful if you need to refresh a signed URL while uploading. Do not abuse this to overwrite artifacts created by another entity! Such as worker-host overwriting artifact created by worker-code.
As a special case the `url` property on _reference artifacts_ can be updated. You should only use this to update the `url` property for reference artifacts your process has created.
Required scopes:
Any of: - queue:create-artifact:<taskId>/<runId> - All of: * queue:create-artifact:<name> * assume:worker-id:<workerGroup>/<workerId>
See #createArtifact
func (*Queue) CreateTask ¶
func (queue *Queue) CreateTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error)
Create a new task, this is an **idempotent** operation, so repeat it if you get an internal server error or network connection is dropped.
**Task `deadline`**: the deadline property can be no more than 5 days into the future. This is to limit the amount of pending tasks not being taken care of. Ideally, you should use a much shorter deadline.
**Task expiration**: the `expires` property must be greater than the task `deadline`. If not provided it will default to `deadline` + one year. Notice, that artifacts created by task must expire before the task.
**Task specific routing-keys**: using the `task.routes` property you may define task specific routing-keys. If a task has a task specific routing-key: `<route>`, then when the AMQP message about the task is published, the message will be CC'ed with the routing-key: `route.<route>`. This is useful if you want another component to listen for completed tasks you have posted. The caller must have scope `queue:route:<route>` for each route.
**Dependencies**: any tasks referenced in `task.dependencies` must have already been created at the time of this call.
**Scopes**: Note that the scopes required to complete this API call depend on the content of the `scopes`, `routes`, `schedulerId`, `priority`, `provisionerId`, and `workerType` properties of the task definition.
**Legacy Scopes**: The `queue:create-task:..` scope without a priority and the `queue:define-task:..` and `queue:task-group-id:..` scopes are considered legacy and should not be used. Note that the new, non-legacy scopes require a `queue:scheduler-id:..` scope as well as scopes for the proper priority.
Required scopes:
All of: * For scope in scopes each <scope> * For route in routes each queue:route:<route> * Any of: - All of: * queue:scheduler-id:<schedulerId> * For priority in priorities each queue:create-task:<priority>:<provisionerId>/<workerType> - If legacyScopes: Any of: - queue:create-task:<provisionerId>/<workerType> - All of: * queue:define-task:<provisionerId>/<workerType> * queue:task-group-id:<schedulerId>/<taskGroupId> * queue:schedule-task:<schedulerId>/<taskGroupId>/<taskId>
See #createTask
func (*Queue) DeclareProvisioner ¶
func (queue *Queue) DeclareProvisioner(provisionerId string, payload *ProvisionerRequest) (*ProvisionerResponse, error)
Stability: *** EXPERIMENTAL ***
Declare a provisioner, supplying some details about it.
`declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are possessed. For example, a request to update the `aws-provisioner-v1` provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope `queue:declare-provisioner:aws-provisioner-v1#description`.
The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity.
Required scopes:
For property in properties each queue:declare-provisioner:<provisionerId>#<property>
See #declareProvisioner
func (*Queue) DeclareWorker ¶
func (queue *Queue) DeclareWorker(provisionerId, workerType, workerGroup, workerId string, payload *WorkerRequest) (*WorkerResponse, error)
Stability: *** EXPERIMENTAL ***
Declare a worker, supplying some details about it.
`declareWorker` allows updating one or more properties of a worker as long as the required scopes are possessed.
Required scopes:
For property in properties each queue:declare-worker:<provisionerId>/<workerType>/<workerGroup>/<workerId>#<property>
See #declareWorker
func (*Queue) DeclareWorkerType ¶
func (queue *Queue) DeclareWorkerType(provisionerId, workerType string, payload *WorkerTypeRequest) (*WorkerTypeResponse, error)
Stability: *** EXPERIMENTAL ***
Declare a workerType, supplying some details about it.
`declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are possessed. For example, a request to update the `gecko-b-1-w2008` worker-type within the `aws-provisioner-v1` provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope `queue:declare-worker-type:aws-provisioner-v1/gecko-b-1-w2008#description`.
Required scopes:
For property in properties each queue:declare-worker-type:<provisionerId>/<workerType>#<property>
See #declareWorkerType
func (*Queue) DefineTask ¶
func (queue *Queue) DefineTask(taskId string, payload *TaskDefinitionRequest) (*TaskStatusResponse, error)
Stability: *** DEPRECATED ***
**Deprecated**, this is the same as `createTask` with a **self-dependency**. This is only present for legacy.
Required scopes:
All of: * For scope in scopes each <scope> * For route in routes each queue:route:<route> * Any of: - All of: * queue:scheduler-id:<schedulerId> * For priority in priorities each queue:create-task:<priority>:<provisionerId>/<workerType> - If legacyScopes: Any of: - queue:define-task:<provisionerId>/<workerType> - queue:create-task:<provisionerId>/<workerType> - All of: * queue:define-task:<provisionerId>/<workerType> * queue:task-group-id:<schedulerId>/<taskGroupId>
See #defineTask
func (*Queue) GetArtifact ¶
Get artifact by `<name>` from a specific run.
**Public Artifacts**, in-order to get an artifact you need the scope `queue:get-artifact:<name>`, where `<name>` is the name of the artifact. But if the artifact `name` starts with `public/`, authentication and authorization is not necessary to fetch the artifact.
**API Clients**, this method will redirect you to the artifact, if it is stored externally. Either way, the response may not be JSON. So API client users might want to generate a signed URL for this end-point and use that URL with an HTTP client that can handle responses correctly.
**Downloading artifacts** There are some special considerations for those http clients which download artifacts. This api endpoint is designed to be compatible with an HTTP 1.1 compliant client, but has extra features to ensure the download is valid. It is strongly recommend that consumers use either taskcluster-lib-artifact (JS), taskcluster-lib-artifact-go (Go) or the CLI written in Go to interact with artifacts.
In order to download an artifact the following must be done:
1. Obtain queue url. Building a signed url with a taskcluster client is recommended 1. Make a GET request which does not follow redirects 1. In all cases, if specified, the x-taskcluster-location-{content,transfer}-{sha256,length} values must be validated to be equal to the Content-Length and Sha256 checksum of the final artifact downloaded. as well as any intermediate redirects 1. If this response is a 500-series error, retry using an exponential backoff. No more than 5 retries should be attempted 1. If this response is a 400-series error, treat it appropriately for your context. This might be an error in responding to this request or an Error storage type body. This request should not be retried. 1. If this response is a 200-series response, the response body is the artifact. If the x-taskcluster-location-{content,transfer}-{sha256,length} and x-taskcluster-location-content-encoding are specified, they should match this response body 1. If the response type is a 300-series redirect, the artifact will be at the location specified by the `Location` header. There are multiple artifact storage types which use a 300-series redirect. 1. For all redirects followed, the user must verify that the content-sha256, content-length, transfer-sha256, transfer-length and content-encoding match every further request. The final artifact must also be validated against the values specified in the original queue response 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference` must not occur 1. A request which has x-taskcluster-artifact-storage-type value of `blob` and does not have x-taskcluster-location-content-sha256 or x-taskcluster-location-content-length must be treated as an error
**Headers** The following important headers are set on the response to this method:
* location: the url of the artifact if a redirect is to be performed * x-taskcluster-artifact-storage-type: the storage type. Example: blob, s3, error
The following important headers are set on responses to this method for Blob artifacts ¶
* x-taskcluster-location-content-sha256: the SHA256 of the artifact *after* any content-encoding is undone. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) * x-taskcluster-location-content-length: the number of bytes *after* any content-encoding is undone * x-taskcluster-location-transfer-sha256: the SHA256 of the artifact *before* any content-encoding is undone. This is the SHA256 of what is sent over the wire. Sha256 is hex encoded (e.g. [0-9A-Fa-f]{64}) * x-taskcluster-location-transfer-length: the number of bytes *after* any content-encoding is undone * x-taskcluster-location-content-encoding: the content-encoding used. It will either be `gzip` or `identity` right now. This is hardcoded to a value set when the artifact was created and no content-negotiation occurs * x-taskcluster-location-content-type: the content-type of the artifact
**Caching**, artifacts may be cached in data centers closer to the workers in-order to reduce bandwidth costs. This can lead to longer response times. Caching can be skipped by setting the header `x-taskcluster-skip-cache: true`, this should only be used for resources where request volume is known to be low, and caching not useful. (This feature may be disabled in the future, use is sparingly!)
Required scopes:
If private: queue:get-artifact:<name>
See #getArtifact
func (*Queue) GetArtifact_SignedURL ¶
func (queue *Queue) GetArtifact_SignedURL(taskId, runId, name string, duration time.Duration) (*url.URL, error)
Returns a signed URL for GetArtifact, valid for the specified duration.
Required scopes:
If private: queue:get-artifact:<name>
See GetArtifact for more details.
func (*Queue) GetLatestArtifact ¶
Get artifact by `<name>` from the last run of a task.
**Public Artifacts**, in-order to get an artifact you need the scope `queue:get-artifact:<name>`, where `<name>` is the name of the artifact. But if the artifact `name` starts with `public/`, authentication and authorization is not necessary to fetch the artifact.
**API Clients**, this method will redirect you to the artifact, if it is stored externally. Either way, the response may not be JSON. So API client users might want to generate a signed URL for this end-point and use that URL with a normal HTTP client.
**Remark**, this end-point is slightly slower than `queue.getArtifact`, so consider that if you already know the `runId` of the latest run. Otherwise, just us the most convenient API end-point.
Required scopes:
If private: queue:get-artifact:<name>
See #getLatestArtifact
func (*Queue) GetLatestArtifact_SignedURL ¶
func (queue *Queue) GetLatestArtifact_SignedURL(taskId, name string, duration time.Duration) (*url.URL, error)
Returns a signed URL for GetLatestArtifact, valid for the specified duration.
Required scopes:
If private: queue:get-artifact:<name>
See GetLatestArtifact for more details.
func (*Queue) GetProvisioner ¶
func (queue *Queue) GetProvisioner(provisionerId string) (*ProvisionerResponse, error)
Stability: *** EXPERIMENTAL ***
Get an active provisioner.
The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity.
See #getProvisioner
func (*Queue) GetWorker ¶
func (queue *Queue) GetWorker(provisionerId, workerType, workerGroup, workerId string) (*WorkerResponse, error)
Stability: *** EXPERIMENTAL ***
Get a worker from a worker-type.
See #getWorker
func (*Queue) GetWorkerType ¶
func (queue *Queue) GetWorkerType(provisionerId, workerType string) (*WorkerTypeResponse, error)
Stability: *** EXPERIMENTAL ***
Get a worker-type from a provisioner.
See #getWorkerType
func (*Queue) ListArtifacts ¶
func (queue *Queue) ListArtifacts(taskId, runId, continuationToken, limit string) (*ListArtifactsResponse, error)
Stability: *** EXPERIMENTAL ***
Returns a list of artifacts and associated meta-data for a given run.
As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`.
By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`.
See #listArtifacts
func (*Queue) ListDependentTasks ¶
func (queue *Queue) ListDependentTasks(taskId, continuationToken, limit string) (*ListDependentTasksResponse, error)
List tasks that depend on the given `taskId`.
As many tasks from different task-groups may dependent on a single tasks, this end-point may return a `continuationToken`. To continue listing tasks you must call `listDependentTasks` again with the `continuationToken` as the query-string option `continuationToken`.
By default this end-point will try to return up to 1000 tasks in one request. But it **may return less**, even if more tasks are available. It may also return a `continuationToken` even though there are no more results. However, you can only be sure to have seen all results if you keep calling `listDependentTasks` with the last `continuationToken` until you get a result without a `continuationToken`.
If you are not interested in listing all the tasks at once, you may use the query-string option `limit` to return fewer.
See #listDependentTasks
func (*Queue) ListLatestArtifacts ¶
func (queue *Queue) ListLatestArtifacts(taskId, continuationToken, limit string) (*ListArtifactsResponse, error)
Stability: *** EXPERIMENTAL ***
Returns a list of artifacts and associated meta-data for the latest run from the given task.
As a task may have many artifacts paging may be necessary. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as the query-string option: `continuationToken`.
By default this end-point will list up-to 1000 artifacts in a single page you may limit this with the query-string parameter `limit`.
See #listLatestArtifacts
func (*Queue) ListProvisioners ¶
func (queue *Queue) ListProvisioners(continuationToken, limit string) (*ListProvisionersResponse, error)
Stability: *** EXPERIMENTAL ***
Get all active provisioners.
The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity.
The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 provisioners in a single page. You may limit this with the query-string parameter `limit`.
See #listProvisioners
func (*Queue) ListTaskGroup ¶
func (queue *Queue) ListTaskGroup(taskGroupId, continuationToken, limit string) (*ListTaskGroupResponse, error)
List tasks sharing the same `taskGroupId`.
As a task-group may contain an unbounded number of tasks, this end-point may return a `continuationToken`. To continue listing tasks you must call the `listTaskGroup` again with the `continuationToken` as the query-string option `continuationToken`.
By default this end-point will try to return up to 1000 members in one request. But it **may return less**, even if more tasks are available. It may also return a `continuationToken` even though there are no more results. However, you can only be sure to have seen all results if you keep calling `listTaskGroup` with the last `continuationToken` until you get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may use the query-string option `limit` to return fewer.
See #listTaskGroup
func (*Queue) ListWorkerTypes ¶
func (queue *Queue) ListWorkerTypes(provisionerId, continuationToken, limit string) (*ListWorkerTypesResponse, error)
Stability: *** EXPERIMENTAL ***
Get all active worker-types for the given provisioner.
The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 worker-types in a single page. You may limit this with the query-string parameter `limit`.
See #listWorkerTypes
func (*Queue) ListWorkers ¶
func (queue *Queue) ListWorkers(provisionerId, workerType, continuationToken, limit, quarantined string) (*ListWorkersResponse, error)
Stability: *** EXPERIMENTAL ***
Get a list of all active workers of a workerType.
`listWorkers` allows a response to be filtered by quarantined and non quarantined workers. To filter the query, you should call the end-point with `quarantined` as a query-string option with a true or false value.
The response is paged. If this end-point returns a `continuationToken`, you should call the end-point again with the `continuationToken` as a query-string option. By default this end-point will list up to 1000 workers in a single page. You may limit this with the query-string parameter `limit`.
See #listWorkers
func (*Queue) PendingTasks ¶
func (queue *Queue) PendingTasks(provisionerId, workerType string) (*CountPendingTasksResponse, error)
Get an approximate number of pending tasks for the given `provisionerId` and `workerType`.
The underlying Azure Storage Queues only promises to give us an estimate. Furthermore, we cache the result in memory for 20 seconds. So consumers should be no means expect this to be an accurate number. It is, however, a solid estimate of the number of pending tasks.
See #pendingTasks
func (*Queue) Ping ¶
Respond without doing anything. This endpoint is used to check that the service is up.
See #ping
func (*Queue) QuarantineWorker ¶
func (queue *Queue) QuarantineWorker(provisionerId, workerType, workerGroup, workerId string, payload *QuarantineWorkerRequest) (*WorkerResponse, error)
Stability: *** EXPERIMENTAL ***
Quarantine a worker ¶
Required scopes:
queue:quarantine-worker:<provisionerId>/<workerType>/<workerGroup>/<workerId>
See #quarantineWorker
func (*Queue) ReclaimTask ¶
func (queue *Queue) ReclaimTask(taskId, runId string) (*TaskReclaimResponse, error)
Refresh the claim for a specific `runId` for given `taskId`. This updates the `takenUntil` property and returns a new set of temporary credentials for performing requests on behalf of the task. These credentials should be used in-place of the credentials returned by `claimWork`.
The `reclaimTask` requests serves to:
- Postpone `takenUntil` preventing the queue from resolving `claim-expired`,
- Refresh temporary credentials used for processing the task, and
- Abort execution if the task/run have been resolved.
If the `takenUntil` timestamp is exceeded the queue will resolve the run as _exception_ with reason `claim-expired`, and proceeded to retry to the task. This ensures that tasks are retried, even if workers disappear without warning.
If the task is resolved, this end-point will return `409` reporting `RequestConflict`. This typically happens if the task have been canceled or the `task.deadline` have been exceeded. If reclaiming fails, workers should abort the task and forget about the given `runId`. There is no need to resolve the run or upload artifacts.
Required scopes:
Any of: - queue:reclaim-task:<taskId>/<runId> - All of: * queue:claim-task * assume:worker-id:<workerGroup>/<workerId>
See #reclaimTask
func (*Queue) ReportCompleted ¶
func (queue *Queue) ReportCompleted(taskId, runId string) (*TaskStatusResponse, error)
Report a task completed, resolving the run as `completed`.
Required scopes:
Any of: - queue:resolve-task:<taskId>/<runId> - All of: * queue:resolve-task * assume:worker-id:<workerGroup>/<workerId>
See #reportCompleted
func (*Queue) ReportException ¶
func (queue *Queue) ReportException(taskId, runId string, payload *TaskExceptionRequest) (*TaskStatusResponse, error)
Resolve a run as _exception_. Generally, you will want to report tasks as failed instead of exception. You should `reportException` if,
- The `task.payload` is invalid,
- Non-existent resources are referenced,
- Declared actions cannot be executed due to unavailable resources,
- The worker had to shutdown prematurely,
- The worker experienced an unknown error, or,
- The task explicitly requested a retry.
Do not use this to signal that some user-specified code crashed for any reason specific to this code. If user-specific code hits a resource that is temporarily unavailable worker should report task _failed_.
Required scopes:
Any of: - queue:resolve-task:<taskId>/<runId> - All of: * queue:resolve-task * assume:worker-id:<workerGroup>/<workerId>
See #reportException
func (*Queue) ReportFailed ¶
func (queue *Queue) ReportFailed(taskId, runId string) (*TaskStatusResponse, error)
Report a run failed, resolving the run as `failed`. Use this to resolve a run that failed because the task specific code behaved unexpectedly. For example the task exited non-zero, or didn't produce expected output.
Do not use this if the task couldn't be run because if malformed payload, or other unexpected condition. In these cases we have a task exception, which should be reported with `reportException`.
Required scopes:
Any of: - queue:resolve-task:<taskId>/<runId> - All of: * queue:resolve-task * assume:worker-id:<workerGroup>/<workerId>
See #reportFailed
func (*Queue) RerunTask ¶
func (queue *Queue) RerunTask(taskId string) (*TaskStatusResponse, error)
Stability: *** DEPRECATED ***
This method _reruns_ a previously resolved task, even if it was _completed_. This is useful if your task completes unsuccessfully, and you just want to run it from scratch again. This will also reset the number of `retries` allowed.
This method is deprecated in favour of creating a new task with the same task definition (but with a new taskId).
Remember that `retries` in the task status counts the number of runs that the queue have started because the worker stopped responding, for example because a spot node died.
**Remark** this operation is idempotent, if you try to rerun a task that is not either `failed` or `completed`, this operation will just return the current task status.
Required scopes:
Any of: - queue:rerun-task:<schedulerId>/<taskGroupId>/<taskId> - All of: * queue:rerun-task * assume:scheduler-id:<schedulerId>/<taskGroupId>
See #rerunTask
func (*Queue) ScheduleTask ¶
func (queue *Queue) ScheduleTask(taskId string) (*TaskStatusResponse, error)
scheduleTask will schedule a task to be executed, even if it has unresolved dependencies. A task would otherwise only be scheduled if its dependencies were resolved.
This is useful if you have defined a task that depends on itself or on some other task that has not been resolved, but you wish the task to be scheduled immediately.
This will announce the task as pending and workers will be allowed to claim it and resolve the task.
**Note** this operation is **idempotent** and will not fail or complain if called with a `taskId` that is already scheduled, or even resolved. To reschedule a task previously resolved, use `rerunTask`.
Required scopes:
Any of: - queue:schedule-task:<schedulerId>/<taskGroupId>/<taskId> - All of: * queue:schedule-task * assume:scheduler-id:<schedulerId>/<taskGroupId>
See #scheduleTask
type RedirectArtifactRequest ¶
type RedirectArtifactRequest struct { // Artifact mime-type for the resource to which the queue should // redirect. Please use the same `Content-Type`, consistently using // the correct mime-type make tooling a lot easier, specifically, // always using `application/json` for JSON artifacts. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[3]/properties/contentType ContentType string `json:"contentType"` // Date-time after which the queue should no longer redirect to this URL. // Note, that the queue will and cannot delete the resource your URL // references, you are responsible for doing that yourself. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[3]/properties/expires Expires tcclient.Time `json:"expires"` // Artifact storage type, in this case `reference` // // Possible values: // * "reference" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[3]/properties/storageType StorageType string `json:"storageType"` // URL to which the queue should redirect using a `303` (See other) // redirect. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[3]/properties/url URL string `json:"url"` }
Request the queue to redirect to a URL for a given artifact. This allows you to reference artifacts that aren't managed by the queue. The queue will still authenticate the request, so depending on the level of secrecy required, secret URLs **might** work. Note, this is mainly useful for public artifacts, for example temporary files directly stored on the worker host and only available there for a specific amount of time.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[3]
type RedirectArtifactResponse ¶
type RedirectArtifactResponse struct { // Artifact storage type, in this case `reference` // // Possible values: // * "reference" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[3]/properties/storageType StorageType string `json:"storageType"` }
Response to a request for the queue to redirect to a URL for a given artifact.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[3]
type RunInformation ¶
type RunInformation struct { // Reason for the creation of this run, // **more reasons may be added in the future**. // // Possible values: // * "scheduled" // * "retry" // * "task-retry" // * "rerun" // * "exception" // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/reasonCreated ReasonCreated string `json:"reasonCreated"` // Reason that run was resolved, this is mainly // useful for runs resolved as `exception`. // Note, **more reasons may be added in the future**, also this // property is only available after the run is resolved. Some of these // reasons, notably `intermittent-task`, `worker-shutdown`, and // `claim-expired`, will trigger an automatic retry of the task. // // Possible values: // * "completed" // * "failed" // * "deadline-exceeded" // * "canceled" // * "superseded" // * "claim-expired" // * "worker-shutdown" // * "malformed-payload" // * "resource-unavailable" // * "internal-error" // * "intermittent-task" // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/reasonResolved ReasonResolved string `json:"reasonResolved,omitempty"` // Date-time at which this run was resolved, ie. when the run changed // state from `running` to either `completed`, `failed` or `exception`. // This property is only present after the run as been resolved. // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/resolved Resolved tcclient.Time `json:"resolved,omitempty"` // Id of this task run, `run-id`s always starts from `0` // // Mininum: 0 // Maximum: 1000 // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/runId RunID int64 `json:"runId"` // Date-time at which this run was scheduled, ie. when the run was // created in state `pending`. // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/scheduled Scheduled tcclient.Time `json:"scheduled"` // Date-time at which this run was claimed, ie. when the run changed // state from `pending` to `running`. This property is only present // after the run has been claimed. // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/started Started tcclient.Time `json:"started,omitempty"` // State of this run // // Possible values: // * "pending" // * "running" // * "completed" // * "failed" // * "exception" // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/state State string `json:"state"` // Time at which the run expires and is resolved as `failed`, if the // run isn't reclaimed. Note, only present after the run has been // claimed. // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/takenUntil TakenUntil tcclient.Time `json:"takenUntil,omitempty"` // Identifier for group that worker who executes this run is a part of, // this identifier is mainly used for efficient routing. // Note, this property is only present after the run is claimed. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/workerGroup WorkerGroup string `json:"workerGroup,omitempty"` // Identifier for worker evaluating this run within given // `workerGroup`. Note, this property is only available after the run // has been claimed. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items/properties/workerId WorkerID string `json:"workerId,omitempty"` }
JSON object with information about a run
See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs/items
type S3ArtifactRequest ¶
type S3ArtifactRequest struct { // Artifact mime-type, when uploading artifact to the signed // `PUT` URL returned from this request this must given with the // `ContentType` header. Please, provide correct mime-type, // this make tooling a lot easier, specifically, // always using `application/json` for JSON artifacts. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[1]/properties/contentType ContentType string `json:"contentType"` // Date-time after which the artifact should be deleted. Note, that // these will be collected over time, and artifacts may remain // available after expiration. S3 based artifacts are identified in // azure table storage and explicitly deleted on S3 after expiration. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[1]/properties/expires Expires tcclient.Time `json:"expires"` // Artifact storage type, in this case `'s3'` // // Possible values: // * "s3" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[1]/properties/storageType StorageType string `json:"storageType"` }
Request for a signed PUT URL that will allow you to upload an artifact to an S3 bucket managed by the queue.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-request.json#/oneOf[1]
type S3ArtifactResponse ¶
type S3ArtifactResponse struct { // Artifact mime-type, must be specified as header when uploading with // the signed `putUrl`. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[1]/properties/contentType ContentType string `json:"contentType"` // Date-time after which the signed `putUrl` no longer works // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[1]/properties/expires Expires tcclient.Time `json:"expires"` // URL to which a `PUT` request can be made to upload the artifact // requested. Note, the `Content-Length` must be specified correctly, // and the `ContentType` header must be set the value specified below. // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[1]/properties/putUrl PutURL string `json:"putUrl"` // Artifact storage type, in this case `'s3'` // // Possible values: // * "s3" // // See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[1]/properties/storageType StorageType string `json:"storageType"` }
Response to a request for a signed PUT URL that will allow you to upload an artifact to an S3 bucket managed by the queue.
See https://taskcluster-staging.net/schemas/queue/v1/post-artifact-response.json#/oneOf[1]
type TaskClaim ¶
type TaskClaim struct { // Temporary credentials granting `task.scopes` and the scope: // `queue:claim-task:<taskId>/<runId>` which allows the worker to reclaim // the task, upload artifacts and report task resolution. // // The temporary credentials are set to expire after `takenUntil`. They // won't expire exactly at `takenUntil` but shortly after, hence, requests // coming close `takenUntil` won't have problems even if there is a little // clock drift. // // Workers should use these credentials when making requests on behalf of // a task. This includes requests to create artifacts, reclaiming the task // reporting the task `completed`, `failed` or `exception`. // // Note, a new set of temporary credentials is issued when the worker // reclaims the task. // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json# Credentials TaskCredentials `json:"credentials"` // `run-id` assigned to this run of the task // // Mininum: 0 // Maximum: 1000 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks/items/properties/runId RunID int64 `json:"runId"` // A representation of **task status** as known by the queue // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json# Status TaskStatusStructure `json:"status"` // Time at which the run expires and is resolved as `exception`, // with reason `claim-expired` if the run haven't been reclaimed. // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks/items/properties/takenUntil TakenUntil tcclient.Time `json:"takenUntil"` // Definition of a task that can be scheduled // // See https://taskcluster-staging.net/schemas/queue/v1/task.json# Task TaskDefinitionResponse `json:"task"` // Identifier for the worker-group within which this run started. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks/items/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for the worker executing this run. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks/items/properties/workerId WorkerID string `json:"workerId"` }
See https://taskcluster-staging.net/schemas/queue/v1/claim-work-response.json#/properties/tasks/items
type TaskClaimRequest ¶
type TaskClaimRequest struct { // Identifier for group that worker claiming the task is a part of. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-request.json#/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for worker within the given workerGroup // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-request.json#/properties/workerId WorkerID string `json:"workerId"` }
Request to claim (or reclaim) a task
See https://taskcluster-staging.net/schemas/queue/v1/task-claim-request.json#
type TaskClaimResponse ¶
type TaskClaimResponse struct { // Temporary credentials granting `task.scopes` and the scope: // `queue:claim-task:<taskId>/<runId>` which allows the worker to reclaim // the task, upload artifacts and report task resolution. // // The temporary credentials are set to expire after `takenUntil`. They // won't expire exactly at `takenUntil` but shortly after, hence, requests // coming close `takenUntil` won't have problems even if there is a little // clock drift. // // Workers should use these credentials when making requests on behalf of // a task. This includes requests to create artifacts, reclaiming the task // reporting the task `completed`, `failed` or `exception`. // // Note, a new set of temporary credentials is issued when the worker // reclaims the task. // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json# Credentials TaskCredentials `json:"credentials"` // `run-id` assigned to this run of the task // // Mininum: 0 // Maximum: 1000 // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-response.json#/properties/runId RunID int64 `json:"runId"` // A representation of **task status** as known by the queue // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json# Status TaskStatusStructure `json:"status"` // Time at which the run expires and is resolved as `exception`, // with reason `claim-expired` if the run haven't been reclaimed. // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-response.json#/properties/takenUntil TakenUntil tcclient.Time `json:"takenUntil"` // Definition of a task that can be scheduled // // See https://taskcluster-staging.net/schemas/queue/v1/task.json# Task TaskDefinitionResponse `json:"task"` // Identifier for the worker-group within which this run started. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-response.json#/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for the worker executing this run. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-claim-response.json#/properties/workerId WorkerID string `json:"workerId"` }
Response to a successful task claim
See https://taskcluster-staging.net/schemas/queue/v1/task-claim-response.json#
type TaskCredentials ¶
type TaskCredentials struct { // The `accessToken` for the temporary credentials. // // Min length: 1 // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json#/properties/accessToken AccessToken string `json:"accessToken"` // The `certificate` for the temporary credentials, these are required // for the temporary credentials to work. // // Min length: 1 // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json#/properties/certificate Certificate string `json:"certificate"` // The `clientId` for the temporary credentials. // // Min length: 1 // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json#/properties/clientId ClientID string `json:"clientId"` }
Temporary credentials granting `task.scopes` and the scope: `queue:claim-task:<taskId>/<runId>` which allows the worker to reclaim the task, upload artifacts and report task resolution.
The temporary credentials are set to expire after `takenUntil`. They won't expire exactly at `takenUntil` but shortly after, hence, requests coming close `takenUntil` won't have problems even if there is a little clock drift.
Workers should use these credentials when making requests on behalf of a task. This includes requests to create artifacts, reclaiming the task reporting the task `completed`, `failed` or `exception`.
Note, a new set of temporary credentials is issued when the worker reclaims the task.
See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json#
type TaskDefinitionAndStatus ¶
type TaskDefinitionAndStatus struct { // A representation of **task status** as known by the queue // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json# Status TaskStatusStructure `json:"status"` // Definition of a task that can be scheduled // // See https://taskcluster-staging.net/schemas/queue/v1/task.json# Task TaskDefinitionResponse `json:"task"` }
Task Definition and task status structure.
See https://taskcluster-staging.net/schemas/queue/v1/task-definition-and-status.json#
type TaskDefinitionRequest ¶
type TaskDefinitionRequest struct { // Creation time of task // // See https://taskcluster-staging.net/schemas/queue/v1/create-task-request.json#/properties/created Created tcclient.Time `json:"created"` // Deadline of the task, `pending` and `running` runs are // resolved as **exception** if not resolved by other means // before the deadline. Note, deadline cannot be more than // 5 days into the future // // See https://taskcluster-staging.net/schemas/queue/v1/create-task-request.json#/properties/deadline Deadline tcclient.Time `json:"deadline"` // List of dependent tasks. These must either be _completed_ or _resolved_ // before this task is scheduled. See `requires` for semantics. // // Default: [] // // Array items: // The `taskId` of a task that must be resolved before this task is // scheduled. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/dependencies/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/dependencies Dependencies []string `json:"dependencies,omitempty"` // Task expiration, time at which task definition and status is deleted. // Notice that all artifacts for the task must have an expiration that is no // later than this. If this property isn't it will be set to `deadline` // plus one year (this default may change). // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/expires Expires tcclient.Time `json:"expires,omitempty"` // Object with properties that can hold any kind of extra data that should be // associated with the task. This can be data for the task which doesn't // fit into `payload`, or it can supplementary data for use in services // listening for events from this task. For example this could be details to // display on _treeherder_, or information for indexing the task. Please, try // to put all related information under one property, so `extra` data keys // for treeherder reporting and task indexing don't conflict, hence, we have // reusable services. **Warning**, do not stuff large data-sets in here -- // task definitions should not take-up multiple MiBs. // // Default: {} // // Additional properties allowed // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/extra Extra json.RawMessage `json:"extra,omitempty"` // Required task metadata // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json# Metadata TaskMetadata `json:"metadata"` // Task-specific payload following worker-specific format. // Refer to the documentation for the worker implementing // `<provisionerId>/<workerType>` for details. // // Additional properties allowed // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/payload Payload json.RawMessage `json:"payload"` // Priority of task. This defaults to `lowest` and the scope // `queue:create-task:<priority>/<provisionerId>/<workerType>` is required // to define a task with `<priority>`. The `normal` priority is treated as // `lowest`. // // Possible values: // * "highest" // * "very-high" // * "high" // * "medium" // * "low" // * "very-low" // * "lowest" // * "normal" // // Default: "lowest" // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/priority Priority string `json:"priority,omitempty"` // Unique identifier for a provisioner, that can supply specified // `workerType` // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // The tasks relation to its dependencies. This property specifies the // semantics of the `task.dependencies` property. // If `all-completed` is given the task will be scheduled when all // dependencies are resolved _completed_ (successful resolution). // If `all-resolved` is given the task will be scheduled when all dependencies // have been resolved, regardless of what their resolution is. // // Possible values: // * "all-completed" // * "all-resolved" // // Default: "all-completed" // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/requires Requires string `json:"requires,omitempty"` // Number of times to retry the task in case of infrastructure issues. // An _infrastructure issue_ is a worker node that crashes or is shutdown, // these events are to be expected. // // Default: 5 // Mininum: 0 // Maximum: 49 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/retries Retries int64 `json:"retries,omitempty"` // List of task-specific routes. Pulse messages about the task will be CC'ed to // `route.<value>` for each `<value>` in this array. // // Default: [] // // Array items: // A task specific route. // // Min length: 1 // Max length: 249 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/routes/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/routes Routes []string `json:"routes,omitempty"` // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: // // * it can represent the entity that created the task; // * it can limit addition of new tasks to a task group: the caller of // `createTask` must have a scope related to the `schedulerId` of the task // group; // * it controls who can manipulate tasks, again by requiring // `schedulerId`-related scopes; and // * it appears in the routing key for Pulse messages about the task. // // Default: "-" // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/schedulerId SchedulerID string `json:"schedulerId,omitempty"` // List of scopes that the task is authorized to use during its execution. // // Array items: // A single scope. A scope must be composed of // printable ASCII characters and spaces. Scopes ending in more than // one `*` character are forbidden. // // Syntax: ^[ -~]*$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/scopes/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/scopes Scopes []string `json:"scopes,omitempty"` // Arbitrary key-value tags (only strings limited to 4k). These can be used // to attach informal metadata to a task. Use this for informal tags that // tasks can be classified by. You can also think of strings here as // candidates for formal metadata. Something like // `purpose: 'build' || 'test'` is a good example. // // Default: {} // // Map entries: // Max length: 4096 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/tags/additionalProperties // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/tags Tags map[string]string `json:"tags,omitempty"` // Identifier for a group of tasks scheduled together with this task. // Generally, all tasks related to a single event such as a version-control // push or a nightly build have the same `taskGroupId`. This property // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), // decision tasks. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/taskGroupId TaskGroupID string `json:"taskGroupId,omitempty"` // Unique identifier for a worker-type within a specific provisioner // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/workerType WorkerType string `json:"workerType"` }
Definition of a task that can be scheduled
See https://taskcluster-staging.net/schemas/queue/v1/create-task-request.json#
type TaskDefinitionResponse ¶
type TaskDefinitionResponse struct { // Creation time of task // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/created Created tcclient.Time `json:"created"` // Deadline of the task, `pending` and `running` runs are // resolved as **exception** if not resolved by other means // before the deadline. Note, deadline cannot be more than // 5 days into the future // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/deadline Deadline tcclient.Time `json:"deadline"` // List of dependent tasks. These must either be _completed_ or _resolved_ // before this task is scheduled. See `requires` for semantics. // // Default: [] // // Array items: // The `taskId` of a task that must be resolved before this task is // scheduled. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/dependencies/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/dependencies Dependencies []string `json:"dependencies"` // Task expiration, time at which task definition and status is deleted. // Notice that all artifacts for the task must have an expiration that is no // later than this. If this property isn't it will be set to `deadline` // plus one year (this default may change). // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/expires Expires tcclient.Time `json:"expires,omitempty"` // Object with properties that can hold any kind of extra data that should be // associated with the task. This can be data for the task which doesn't // fit into `payload`, or it can supplementary data for use in services // listening for events from this task. For example this could be details to // display on _treeherder_, or information for indexing the task. Please, try // to put all related information under one property, so `extra` data keys // for treeherder reporting and task indexing don't conflict, hence, we have // reusable services. **Warning**, do not stuff large data-sets in here -- // task definitions should not take-up multiple MiBs. // // Default: {} // // Additional properties allowed // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/extra Extra json.RawMessage `json:"extra"` // Required task metadata // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json# Metadata TaskMetadata `json:"metadata"` // Task-specific payload following worker-specific format. // Refer to the documentation for the worker implementing // `<provisionerId>/<workerType>` for details. // // Additional properties allowed // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/payload Payload json.RawMessage `json:"payload"` // Priority of task. This defaults to `lowest` and the scope // `queue:create-task:<priority>/<provisionerId>/<workerType>` is required // to define a task with `<priority>`. The `normal` priority is treated as // `lowest`. // // Possible values: // * "highest" // * "very-high" // * "high" // * "medium" // * "low" // * "very-low" // * "lowest" // * "normal" // // Default: "lowest" // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/priority Priority string `json:"priority"` // Unique identifier for a provisioner, that can supply specified // `workerType` // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // The tasks relation to its dependencies. This property specifies the // semantics of the `task.dependencies` property. // If `all-completed` is given the task will be scheduled when all // dependencies are resolved _completed_ (successful resolution). // If `all-resolved` is given the task will be scheduled when all dependencies // have been resolved, regardless of what their resolution is. // // Possible values: // * "all-completed" // * "all-resolved" // // Default: "all-completed" // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/requires Requires string `json:"requires"` // Number of times to retry the task in case of infrastructure issues. // An _infrastructure issue_ is a worker node that crashes or is shutdown, // these events are to be expected. // // Default: 5 // Mininum: 0 // Maximum: 49 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/retries Retries int64 `json:"retries"` // List of task-specific routes. Pulse messages about the task will be CC'ed to // `route.<value>` for each `<value>` in this array. // // Default: [] // // Array items: // A task specific route. // // Min length: 1 // Max length: 249 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/routes/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/routes Routes []string `json:"routes"` // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: // // * it can represent the entity that created the task; // * it can limit addition of new tasks to a task group: the caller of // `createTask` must have a scope related to the `schedulerId` of the task // group; // * it controls who can manipulate tasks, again by requiring // `schedulerId`-related scopes; and // * it appears in the routing key for Pulse messages about the task. // // Default: "-" // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/schedulerId SchedulerID string `json:"schedulerId"` // List of scopes that the task is authorized to use during its execution. // // Array items: // A single scope. A scope must be composed of // printable ASCII characters and spaces. Scopes ending in more than // one `*` character are forbidden. // // Syntax: ^[ -~]*$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/scopes/items // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/scopes Scopes []string `json:"scopes"` // Arbitrary key-value tags (only strings limited to 4k). These can be used // to attach informal metadata to a task. Use this for informal tags that // tasks can be classified by. You can also think of strings here as // candidates for formal metadata. Something like // `purpose: 'build' || 'test'` is a good example. // // Default: {} // // Map entries: // Max length: 4096 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/tags/additionalProperties // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/tags Tags map[string]string `json:"tags"` // Identifier for a group of tasks scheduled together with this task. // Generally, all tasks related to a single event such as a version-control // push or a nightly build have the same `taskGroupId`. This property // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), // decision tasks. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/taskGroupId TaskGroupID string `json:"taskGroupId"` // Unique identifier for a worker-type within a specific provisioner // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/workerType WorkerType string `json:"workerType"` }
Definition of a task that can be scheduled
See https://taskcluster-staging.net/schemas/queue/v1/task.json#
type TaskExceptionRequest ¶
type TaskExceptionRequest struct { // Reason that the task is resolved with an exception. This is a subset // of the values for `resolvedReason` given in the task status structure. // **Report `worker-shutdown`** if the run failed because the worker // had to shutdown (spot node disappearing). In case of `worker-shutdown` // the queue will immediately **retry** the task, by making a new run. // This is much faster than ignoreing the issue and letting the task _retry_ // by claim expiration. For any other _reason_ reported the queue will not // retry the task. // **Report `malformed-payload`** if the `task.payload` doesn't match the // schema for the worker payload, or referenced resource doesn't exists. // In either case, you should still log the error to a log file for the // specific run. // **Report `resource-unavailable`** if a resource/service needed or // referenced in `task.payload` is _temporarily_ unavailable. Do not use this // unless you know the resource exists, if the resource doesn't exist you // should report `malformed-payload`. Example use-case if you contact the // index (a service) on behalf of the task, because of a declaration in // `task.payload`, and the service (index) is temporarily down. Don't use // this if a URL returns 404, but if it returns 503 or hits a timeout when // you retry the request, then this _may_ be a valid exception. The queue // assumes that workers have applied retries as needed, and will not retry // the task. // **Report `internal-error`** if the worker experienced an unhandled internal // error from which it couldn't recover. The queue will not retry runs // resolved with this reason, but you are clearly signaling that this is a // bug in the worker code. // **Report `superseded`** if the task was determined to have been // superseded by another task, and its results are no longer needed. It is // convention in this case to create an artifact entitled // `public/superseded-by` containing the taskId of the task that superseded // this one. // **Report `intermittent-task`** if the task explicitly requested a retry // because task is intermittent. Workers can choose whether or not to // support this, but workers shouldn't blindly report this for every task // that fails. // // Possible values: // * "worker-shutdown" // * "malformed-payload" // * "resource-unavailable" // * "internal-error" // * "superseded" // * "intermittent-task" // // See https://taskcluster-staging.net/schemas/queue/v1/task-exception-request.json#/properties/reason Reason string `json:"reason"` }
Request for a run of a task to be resolved with an exception
See https://taskcluster-staging.net/schemas/queue/v1/task-exception-request.json#
type TaskMetadata ¶
type TaskMetadata struct { // Human readable description of the task, please **explain** what the // task does. A few lines of documentation is not going to hurt you. // // Max length: 32768 // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json#/properties/description Description string `json:"description"` // Human readable name of task, used to very briefly given an idea about // what the task does. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json#/properties/name Name string `json:"name"` // E-mail of person who caused this task, e.g. the person who did // `hg push`. The person we should contact to ask why this task is here. // // Max length: 255 // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json#/properties/owner Owner string `json:"owner"` // Link to source of this task, should specify a file, revision and // repository. This should be place someone can go an do a git/hg blame // to who came up with recipe for this task. // // Syntax: ^https?:// // Max length: 4096 // // See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json#/properties/source Source string `json:"source"` }
Required task metadata
See https://taskcluster-staging.net/schemas/queue/v1/task-metadata.json#
type TaskReclaimResponse ¶
type TaskReclaimResponse struct { // Temporary credentials granting `task.scopes` and the scope: // `queue:claim-task:<taskId>/<runId>` which allows the worker to reclaim // the task, upload artifacts and report task resolution. // // The temporary credentials are set to expire after `takenUntil`. They // won't expire exactly at `takenUntil` but shortly after, hence, requests // coming close `takenUntil` won't have problems even if there is a little // clock drift. // // Workers should use these credentials when making requests on behalf of // a task. This includes requests to create artifacts, reclaiming the task // reporting the task `completed`, `failed` or `exception`. // // Note, a new set of temporary credentials is issued when the worker // reclaims the task. // // See https://taskcluster-staging.net/schemas/queue/v1/task-credentials.json# Credentials TaskCredentials `json:"credentials"` // `run-id` assigned to this run of the task // // Mininum: 0 // Maximum: 1000 // // See https://taskcluster-staging.net/schemas/queue/v1/task-reclaim-response.json#/properties/runId RunID int64 `json:"runId"` // A representation of **task status** as known by the queue // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json# Status TaskStatusStructure `json:"status"` // Time at which the run expires and is resolved as `exception`, // with reason `claim-expired` if the run haven't been reclaimed. // // See https://taskcluster-staging.net/schemas/queue/v1/task-reclaim-response.json#/properties/takenUntil TakenUntil tcclient.Time `json:"takenUntil"` // Identifier for the worker-group within which this run started. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-reclaim-response.json#/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for the worker executing this run. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task-reclaim-response.json#/properties/workerId WorkerID string `json:"workerId"` }
Response to a successful task claim
See https://taskcluster-staging.net/schemas/queue/v1/task-reclaim-response.json#
type TaskRun ¶
type TaskRun struct { // Id of this task run, `run-id`s always starts from `0` // // Mininum: 0 // Maximum: 1000 // // See https://taskcluster-staging.net/schemas/queue/v1/task-run.json#/properties/runId RunID int64 `json:"runId"` // Unique task identifier, this is UUID encoded as // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and // stripped of `=` padding. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task-run.json#/properties/taskId TaskID string `json:"taskId"` }
A run of a task.
See https://taskcluster-staging.net/schemas/queue/v1/task-run.json#
type TaskStatusResponse ¶
type TaskStatusResponse struct { // A representation of **task status** as known by the queue // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json# Status TaskStatusStructure `json:"status"` }
Response to a task status request
See https://taskcluster-staging.net/schemas/queue/v1/task-status-response.json#
type TaskStatusStructure ¶
type TaskStatusStructure struct { // Deadline of the task, `pending` and `running` runs are // resolved as **exception** if not resolved by other means // before the deadline. Note, deadline cannot be more than // 5 days into the future // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/deadline Deadline tcclient.Time `json:"deadline"` // Task expiration, time at which task definition and // status is deleted. Notice that all artifacts for the task // must have an expiration that is no later than this. // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/expires Expires tcclient.Time `json:"expires"` // Unique identifier for a provisioner, that can supply specified // `workerType` // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // Number of retries left for the task in case of infrastructure issues // // Mininum: 0 // Maximum: 999 // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/retriesLeft RetriesLeft int64 `json:"retriesLeft"` // List of runs, ordered so that index `i` has `runId == i` // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/runs Runs []RunInformation `json:"runs"` // All tasks in a task group must have the same `schedulerId`. This is used for several purposes: // // * it can represent the entity that created the task; // * it can limit addition of new tasks to a task group: the caller of // `createTask` must have a scope related to the `schedulerId` of the task // group; // * it controls who can manipulate tasks, again by requiring // `schedulerId`-related scopes; and // * it appears in the routing key for Pulse messages about the task. // // Default: "-" // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/schedulerId SchedulerID string `json:"schedulerId"` // State of this task. This is just an auxiliary property derived from state // of latests run, or `unscheduled` if none. // // Possible values: // * "unscheduled" // * "pending" // * "running" // * "completed" // * "failed" // * "exception" // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/state State string `json:"state"` // Identifier for a group of tasks scheduled together with this task. // Generally, all tasks related to a single event such as a version-control // push or a nightly build have the same `taskGroupId`. This property // defaults to `taskId` if it isn't specified. Tasks with `taskId` equal to // the `taskGroupId` are, [by convention](/docs/manual/using/task-graph), // decision tasks. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/taskGroupId TaskGroupID string `json:"taskGroupId"` // Unique task identifier, this is UUID encoded as // [URL-safe base64](http://tools.ietf.org/html/rfc4648#section-5) and // stripped of `=` padding. // // Syntax: ^[A-Za-z0-9_-]{8}[Q-T][A-Za-z0-9_-][CGKOSWaeimquy26-][A-Za-z0-9_-]{10}[AQgw]$ // // See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#/properties/taskId TaskID string `json:"taskId"` // Unique identifier for a worker-type within a specific provisioner // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/task.json#/properties/workerType WorkerType string `json:"workerType"` }
A representation of **task status** as known by the queue
See https://taskcluster-staging.net/schemas/queue/v1/task-status.json#
type Worker ¶
type Worker struct { // Date of the first time this worker claimed a task. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/workers/items/properties/firstClaim FirstClaim tcclient.Time `json:"firstClaim"` // A run of a task. // // See https://taskcluster-staging.net/schemas/queue/v1/task-run.json# LatestTask TaskRun `json:"latestTask,omitempty"` // Quarantining a worker allows the machine to remain alive but not accept jobs. // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or // somewhere in the past). // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/workers/items/properties/quarantineUntil QuarantineUntil tcclient.Time `json:"quarantineUntil,omitempty"` // Identifier for the worker group containing this worker. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/workers/items/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for this worker (unique within this worker group). // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/list-workers-response.json#/properties/workers/items/properties/workerId WorkerID string `json:"workerId"` }
type WorkerAction ¶
type WorkerAction struct { // Only actions with the context `worker` are included. // // Possible values: // * "worker" // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/context Context string `json:"context"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/description Description string `json:"description"` // Method to indicate the desired action to be performed for a given resource. // // Possible values: // * "POST" // * "PUT" // * "DELETE" // * "PATCH" // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/method Method string `json:"method"` // Short names for things like logging/error messages. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/name Name string `json:"name"` // Appropriate title for any sort of Modal prompt. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/title Title json.RawMessage `json:"title"` // When an action is triggered, a request is made using the `url` and `method`. // Depending on the `context`, the following parameters will be substituted in the url: // // | `context` | Path parameters | // |-------------|----------------------------------------------------------| // | provisioner | <provisionerId> | // | worker-type | <provisionerId>, <workerType> | // | worker | <provisionerId>, <workerType>, <workerGroup>, <workerId> | // // _Note: The request needs to be signed with the user's Taskcluster credentials._ // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items/properties/url URL string `json:"url"` }
Actions provide a generic mechanism to expose additional features of a provisioner, worker type, or worker to Taskcluster clients.
An action is comprised of metadata describing the feature it exposes, together with a webhook for triggering it.
The Taskcluster tools site, for example, retrieves actions when displaying provisioners, worker types and workers. It presents the provisioner/worker type/worker specific actions to the user. When the user triggers an action, the web client takes the registered webhook, substitutes parameters into the URL (see `url`), signs the requests with the Taskcluster credentials of the user operating the web interface, and issues the HTTP request.
The level to which the action relates (provisioner, worker type, worker) is called the action context. All actions, regardless of the action contexts, are registered against the provisioner when calling `queue.declareProvisioner`.
The action context is used by the web client to determine where in the web interface to present the action to the user as follows:
| `context` | Tool where action is displayed | |-------------|--------------------------------| | provisioner | Provisioner Explorer | | worker-type | Workers Explorer | | worker | Worker Explorer |
See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) for more information.
See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions/items
type WorkerRequest ¶
type WorkerRequest struct { // Date and time after which the worker will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/update-worker-request.json#/properties/expires Expires tcclient.Time `json:"expires,omitempty"` }
Request to update a worker.
See https://taskcluster-staging.net/schemas/queue/v1/update-worker-request.json#
type WorkerResponse ¶
type WorkerResponse struct { // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/actions Actions []WorkerAction `json:"actions"` // Date and time after which the worker will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/expires Expires tcclient.Time `json:"expires"` // Date of the first time this worker claimed a task. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/firstClaim FirstClaim tcclient.Time `json:"firstClaim"` // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // Quarantining a worker allows the machine to remain alive but not accept jobs. // Once the quarantineUntil time has elapsed, the worker resumes accepting jobs. // Note that a quarantine can be lifted by setting `quarantineUntil` to the present time (or // somewhere in the past). // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/quarantineUntil QuarantineUntil tcclient.Time `json:"quarantineUntil,omitempty"` // List of 20 most recent tasks claimed by the worker. // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/recentTasks RecentTasks []TaskRun `json:"recentTasks"` // Identifier for group that worker who executes this run is a part of, // this identifier is mainly used for efficient routing. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/workerGroup WorkerGroup string `json:"workerGroup"` // Identifier for worker evaluating this run within given // `workerGroup`. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/workerId WorkerID string `json:"workerId"` // WorkerType name. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#/properties/workerType WorkerType string `json:"workerType"` }
Response containing information about a worker.
See https://taskcluster-staging.net/schemas/queue/v1/worker-response.json#
type WorkerType ¶
type WorkerType struct { // Description of the worker-type. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/description Description string `json:"description"` // Date and time after which the worker-type will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/expires Expires tcclient.Time `json:"expires"` // Date and time where the worker-type was last seen active // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/lastDateActive LastDateActive tcclient.Time `json:"lastDateActive"` // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/provisionerId ProvisionerID string `json:"provisionerId"` // This is the stability of the worker-type. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/stability Stability string `json:"stability"` // WorkerType name. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/list-workertypes-response.json#/properties/workerTypes/items/properties/workerType WorkerType string `json:"workerType"` }
type WorkerTypeAction ¶
type WorkerTypeAction struct { // Only actions with the context `worker-type` are included. // // Possible values: // * "worker-type" // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/context Context string `json:"context"` // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/description Description string `json:"description"` // Method to indicate the desired action to be performed for a given resource. // // Possible values: // * "POST" // * "PUT" // * "DELETE" // * "PATCH" // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/method Method string `json:"method"` // Short names for things like logging/error messages. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/name Name string `json:"name"` // Appropriate title for any sort of Modal prompt. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/title Title json.RawMessage `json:"title"` // When an action is triggered, a request is made using the `url` and `method`. // Depending on the `context`, the following parameters will be substituted in the url: // // | `context` | Path parameters | // |-------------|----------------------------------------------------------| // | provisioner | <provisionerId> | // | worker-type | <provisionerId>, <workerType> | // | worker | <provisionerId>, <workerType>, <workerGroup>, <workerId> | // // _Note: The request needs to be signed with the user's Taskcluster credentials._ // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items/properties/url URL string `json:"url"` }
Actions provide a generic mechanism to expose additional features of a provisioner, worker type, or worker to Taskcluster clients.
An action is comprised of metadata describing the feature it exposes, together with a webhook for triggering it.
The Taskcluster tools site, for example, retrieves actions when displaying provisioners, worker types and workers. It presents the provisioner/worker type/worker specific actions to the user. When the user triggers an action, the web client takes the registered webhook, substitutes parameters into the URL (see `url`), signs the requests with the Taskcluster credentials of the user operating the web interface, and issues the HTTP request.
The level to which the action relates (provisioner, worker type, worker) is called the action context. All actions, regardless of the action contexts, are registered against the provisioner when calling `queue.declareProvisioner`.
The action context is used by the web client to determine where in the web interface to present the action to the user as follows:
| `context` | Tool where action is displayed | |-------------|--------------------------------| | provisioner | Provisioner Explorer | | worker-type | Workers Explorer | | worker | Worker Explorer |
See [actions docs](/docs/reference/platform/taskcluster-queue/docs/actions) for more information.
See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions/items
type WorkerTypeRequest ¶
type WorkerTypeRequest struct { // Description of the provisioner. // // See https://taskcluster-staging.net/schemas/queue/v1/update-workertype-request.json#/properties/description Description string `json:"description,omitempty"` // Date and time after which the worker-type will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/update-workertype-request.json#/properties/expires Expires tcclient.Time `json:"expires,omitempty"` // This is the stability of the provisioner. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/update-workertype-request.json#/properties/stability Stability string `json:"stability,omitempty"` }
Request to update a worker-type.
See https://taskcluster-staging.net/schemas/queue/v1/update-workertype-request.json#
type WorkerTypeResponse ¶
type WorkerTypeResponse struct { // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/actions Actions []WorkerTypeAction `json:"actions"` // Description of the worker-type. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/description Description string `json:"description"` // Date and time after which the worker-type will be automatically // deleted by the queue. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/expires Expires tcclient.Time `json:"expires"` // Date of the last time this worker-type was seen active. `lastDateActive` is updated every 6 hours // but may be off by up-to 6 hours. Nonetheless, `lastDateActive` is a good indicator // of when the worker-type was last seen active. // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/lastDateActive LastDateActive tcclient.Time `json:"lastDateActive"` // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/provisionerId ProvisionerID string `json:"provisionerId"` // This is the stability of the worker-type. Accepted values: // * `experimental` // * `stable` // * `deprecated` // // Possible values: // * "experimental" // * "stable" // * "deprecated" // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/stability Stability string `json:"stability"` // WorkerType name. // // Syntax: ^([a-zA-Z0-9-_]*)$ // Min length: 1 // Max length: 38 // // See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#/properties/workerType WorkerType string `json:"workerType"` }
Response to a worker-type request from a provisioner.
See https://taskcluster-staging.net/schemas/queue/v1/workertype-response.json#