Documentation
¶
Index ¶
- func CopyProtocolUpsert(ctx context.Context, tx pgx.Tx, tableName string, schema string, ...) error
- func JobErrorsSchema() string
- func JobRunAssignmentSchema() string
- func JobRunErrorsSchema() string
- func JobsSchema() string
- func LeaderelectionSchema() string
- func NamesFromRecord(x interface{}) []string
- func NamesValuesFromRecord(x interface{}) ([]string, []interface{})
- func NodeInfoSchema() string
- func NodeTypesMatchingPod(nodeTypes map[string]*schedulerobjects.NodeType, ...) ([]*schedulerobjects.NodeType, map[string]int, error)
- func PriorityFromJob(job *api.Job, priorityByPriorityClassName map[string]int32) (priority int32, ok bool)
- func PulsarSchema() string
- func ResourceListAsWeightedApproximateFloat64(resourceScarcity map[string]float64, rl schedulerobjects.ResourceList) float64
- func RunsSchema() string
- func Upsert(ctx context.Context, db *pgxpool.Pool, tableName string, schema string, ...) error
- func ValuesFromRecord(x interface{}) []interface{}
- func WeightsFromAggregatedUsageByQueue(resourceScarcity map[string]float64, priorityFactorByQueue map[string]float64, ...) map[string]float64
- func WriteDbOp(ctx context.Context, db *pgxpool.Pool, op DbOperation) error
- type DBTX
- type DbOperation
- type DbOperationsBatcher
- type DbOperationsWithMessageIds
- type DbOpsWriter
- type ErrLostLeadership
- type ExecutorApi
- func (srv *ExecutorApi) RenewLease(ctx context.Context, req *api.RenewLeaseRequest) (*api.IdList, error)
- func (srv *ExecutorApi) ReportDone(ctx context.Context, req *api.IdList) (*api.IdList, error)
- func (srv *ExecutorApi) ReportUsage(ctx context.Context, req *api.ClusterUsageReport) (*types.Empty, error)
- func (srv *ExecutorApi) ReturnLease(ctx context.Context, req *api.ReturnLeaseRequest) (*types.Empty, error)
- func (srv *ExecutorApi) StreamingLeaseJobs(stream api.AggregatedQueue_StreamingLeaseJobsServer) error
- type Ingester
- type InsertJobErrors
- type InsertJobRunErrors
- type InsertJobs
- type InsertRunAssignments
- type InsertRuns
- type Job
- type JobError
- type JobRunAssignment
- type JobRunError
- type JobRuns
- type JobSchedulingReport
- type JobSetOperation
- type JobsIterator
- type LeaderElection
- type Leaderelection
- type LegacyScheduler
- type MarkJobSetsCancelled
- type MarkJobsCancelled
- type MarkJobsFailed
- type MarkJobsSucceeded
- type MarkRunsAsSentByExecutorAndJobIdParams
- type MarkRunsFailed
- type MarkRunsRunning
- type MarkRunsSucceeded
- type NodeDb
- func (nodeDb *NodeDb) BindNodeToPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements, ...) error
- func (nodeDb *NodeDb) MarkJobRunning(jobId uuid.UUID)
- func (nodeDb *NodeDb) NodeTypesMatchingPod(req *schedulerobjects.PodRequirements) ([]*schedulerobjects.NodeType, map[string]int, error)
- func (nodeDb *NodeDb) SelectAndBindNodeToPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements) (*PodSchedulingReport, error)
- func (nodeDb *NodeDb) SelectNodeForPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements) (*PodSchedulingReport, error)
- func (nodeDb *NodeDb) String() string
- func (nodeDb *NodeDb) Upsert(nodes []*schedulerobjects.Node) error
- type NodeItemAvailableResourceIndex
- type NodeTypeResourceIterator
- type NodeTypesResourceIterator
- type NodeTypesResourceIteratorItem
- type NodeTypesResourceIteratorPQ
- type Nodeinfo
- type PodSchedulingReport
- type Pulsar
- type Queries
- func (q *Queries) GetTopicMessageIds(ctx context.Context, topic string) ([]Pulsar, error)
- func (q *Queries) MarkJobCancelledById(ctx context.Context, jobID uuid.UUID) error
- func (q *Queries) MarkJobFailedById(ctx context.Context, jobID uuid.UUID) error
- func (q *Queries) MarkJobRunCancelledByJobId(ctx context.Context, jobID uuid.UUID) error
- func (q *Queries) MarkJobRunFailedById(ctx context.Context, runID uuid.UUID) error
- func (q *Queries) MarkJobRunRunningById(ctx context.Context, runID uuid.UUID) error
- func (q *Queries) MarkJobRunSucceededById(ctx context.Context, runID uuid.UUID) error
- func (q *Queries) MarkJobRunsCancelledByJobId(ctx context.Context, jobIds []uuid.UUID) error
- func (q *Queries) MarkJobRunsCancelledBySet(ctx context.Context, jobSet string) error
- func (q *Queries) MarkJobRunsCancelledBySets(ctx context.Context, jobSets []string) error
- func (q *Queries) MarkJobRunsFailedById(ctx context.Context, runIds []uuid.UUID) error
- func (q *Queries) MarkJobRunsRunningById(ctx context.Context, runIds []uuid.UUID) error
- func (q *Queries) MarkJobRunsSucceededById(ctx context.Context, runIds []uuid.UUID) error
- func (q *Queries) MarkJobSucceededById(ctx context.Context, jobID uuid.UUID) error
- func (q *Queries) MarkJobsCancelledById(ctx context.Context, jobIds []uuid.UUID) error
- func (q *Queries) MarkJobsCancelledBySet(ctx context.Context, jobSet string) error
- func (q *Queries) MarkJobsCancelledBySets(ctx context.Context, jobSets []string) error
- func (q *Queries) MarkJobsFailedById(ctx context.Context, jobIds []uuid.UUID) error
- func (q *Queries) MarkJobsFailedBySet(ctx context.Context, jobSet string) error
- func (q *Queries) MarkJobsFailedBySets(ctx context.Context, jobSets []string) error
- func (q *Queries) MarkJobsSucceededById(ctx context.Context, jobIds []uuid.UUID) error
- func (q *Queries) MarkJobsSucceededBySet(ctx context.Context, jobSet string) error
- func (q *Queries) MarkJobsSucceededBySets(ctx context.Context, jobSets []string) error
- func (q *Queries) MarkRunsAsSent(ctx context.Context, runIds []uuid.UUID) error
- func (q *Queries) MarkRunsAsSentByExecutorAndJobId(ctx context.Context, arg MarkRunsAsSentByExecutorAndJobIdParams) error
- func (q *Queries) SelectJobErrorsById(ctx context.Context, jobID uuid.UUID) ([]JobError, error)
- func (q *Queries) SelectJobsFromIds(ctx context.Context, jobIds []uuid.UUID) ([]Job, error)
- func (q *Queries) SelectLeader(ctx context.Context) (Leaderelection, error)
- func (q *Queries) SelectNewActiveJobs(ctx context.Context, serial int64) ([]Job, error)
- func (q *Queries) SelectNewJobErrors(ctx context.Context, serial int64) ([]JobError, error)
- func (q *Queries) SelectNewJobs(ctx context.Context, serial int64) ([]Job, error)
- func (q *Queries) SelectNewNodeInfo(ctx context.Context, serial int64) ([]Nodeinfo, error)
- func (q *Queries) SelectNewRunAssignments(ctx context.Context, serial int64) ([]JobRunAssignment, error)
- func (q *Queries) SelectNewRunErrors(ctx context.Context, serial int64) ([]JobRunError, error)
- func (q *Queries) SelectNewRunsForExecutorWithLimit(ctx context.Context, arg SelectNewRunsForExecutorWithLimitParams) ([]Run, error)
- func (q *Queries) SelectNewRunsForJobs(ctx context.Context, arg SelectNewRunsForJobsParams) ([]Run, error)
- func (q *Queries) SelectQueueJobSetFromId(ctx context.Context, jobID uuid.UUID) (SelectQueueJobSetFromIdRow, error)
- func (q *Queries) SelectQueueJobSetFromIds(ctx context.Context, jobIds []uuid.UUID) ([]SelectQueueJobSetFromIdsRow, error)
- func (q *Queries) SelectReplicaById(ctx context.Context, id uuid.UUID) (Leaderelection, error)
- func (q *Queries) SelectRunErrorsById(ctx context.Context, runID uuid.UUID) ([]JobRunError, error)
- func (q *Queries) SelectRunsFromExecutorAndJobs(ctx context.Context, arg SelectRunsFromExecutorAndJobsParams) ([]Run, error)
- func (q *Queries) SelectUnsentRunsForExecutor(ctx context.Context, executor string) ([]Run, error)
- func (q *Queries) UpdateJobPriorityById(ctx context.Context, arg UpdateJobPriorityByIdParams) error
- func (q *Queries) UpdateJobPriorityByJobSet(ctx context.Context, arg UpdateJobPriorityByJobSetParams) error
- func (q *Queries) UpsertMessageId(ctx context.Context, arg UpsertMessageIdParams) error
- func (q *Queries) WithTx(tx pgx.Tx) *Queries
- type Queue
- type QueueCandidateJobsIterator
- type QueueSchedulingReport
- type QueuedJobsIterator
- type Run
- type Scheduler
- type SchedulerJobRepository
- type SchedulingReportsRepository
- func (repo *SchedulingReportsRepository) Add(queueName string, report *JobSchedulingReport)
- func (repo *SchedulingReportsRepository) GetJobReport(ctx context.Context, jobId *schedulerobjects.JobId) (*schedulerobjects.JobReport, error)
- func (repo *SchedulingReportsRepository) GetJobSchedulingReport(jobId uuid.UUID) (*JobSchedulingReport, bool)
- func (repo *SchedulingReportsRepository) GetQueueReport(ctx context.Context, queue *schedulerobjects.Queue) (*schedulerobjects.QueueReport, error)
- func (repo *SchedulingReportsRepository) GetQueueSchedulingReport(queueName string) (*QueueSchedulingReport, bool)
- type SchedulingRoundReport
- func (schedulingRoundReport *SchedulingRoundReport) AddJobSchedulingReport(jobSchedulingReport *JobSchedulingReport)
- func (schedulingRoundReport *SchedulingRoundReport) AddScheduledResources(queue string, req *schedulerobjects.PodRequirements)
- func (schedulingRoundReport *SchedulingRoundReport) ClearJobSpecs()
- func (report *SchedulingRoundReport) String() string
- type SelectNewRunsForExecutorWithLimitParams
- type SelectNewRunsForJobsParams
- type SelectQueueJobSetFromIdRow
- type SelectQueueJobSetFromIdsRow
- type SelectRunsFromExecutorAndJobsParams
- type UpdateJobPriorities
- type UpdateJobPriorityByIdParams
- type UpdateJobPriorityByJobSetParams
- type UpdateJobSetPriorities
- type UpsertMessageIdParams
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func CopyProtocolUpsert ¶
func JobErrorsSchema ¶
func JobErrorsSchema() string
func JobRunAssignmentSchema ¶
func JobRunAssignmentSchema() string
func JobRunErrorsSchema ¶
func JobRunErrorsSchema() string
func JobsSchema ¶
func JobsSchema() string
func LeaderelectionSchema ¶
func LeaderelectionSchema() string
func NamesFromRecord ¶
func NamesFromRecord(x interface{}) []string
NamesFromRecord returns a slice composed of the field names in a struct marked with "db" tags.
For example, if x is an instance of a struct with definition
type Rectangle struct { Width int `db:"width"` Height int `db:"height"` },
it returns ["width", "height"].
func NamesValuesFromRecord ¶
func NamesValuesFromRecord(x interface{}) ([]string, []interface{})
NamesValuesFromRecord returns a slice composed of the field names and another composed of the corresponding values for fields of a struct marked with "db" tags.
For example, if x is an instance of a struct with definition
type Rectangle struct { Width int `db:"width"` Height int `db:"height"` },
where Width = 10 and Height = 5, it returns ["width", "height"], [10, 5].
This function does not handle pointers to structs, i.e., x must be Rectangle{} and not &Rectangle{}.
func NodeInfoSchema ¶
func NodeInfoSchema() string
func NodeTypesMatchingPod ¶ added in v0.3.35
func NodeTypesMatchingPod(nodeTypes map[string]*schedulerobjects.NodeType, req *schedulerobjects.PodRequirements) ([]*schedulerobjects.NodeType, map[string]int, error)
NodeTypesMatchingPod returns a slice composed of all node types a given pod could be scheduled on, i.e., all node types with matching node selectors and no untolerated taints.
TODO: Update docstring.
func PriorityFromJob ¶ added in v0.3.35
func PulsarSchema ¶
func PulsarSchema() string
func ResourceListAsWeightedApproximateFloat64 ¶ added in v0.3.35
func ResourceListAsWeightedApproximateFloat64(resourceScarcity map[string]float64, rl schedulerobjects.ResourceList) float64
func RunsSchema ¶
func RunsSchema() string
func Upsert ¶
func Upsert(ctx context.Context, db *pgxpool.Pool, tableName string, schema string, records []interface{}) error
Upsert is an optimised SQL call for bulk upserts.
For efficiency, this function: 1. Creates an empty temporary SQL table. 2. Inserts all records into the temporary table using the postgres-specific COPY wire protocol. 3. Upserts all records from the temporary table into the target table (as specified by tableName).
The COPY protocol can be faster than repeated inserts for as little as 5 rows; see https://www.postgresql.org/docs/current/populate.html https://pkg.go.dev/github.com/jackc/pgx/v4#hdr-Copy_Protocol
The records to write should be structs with fields marked with "db" tags. Field names and values are extracted using the NamesValuesFromRecord function; see its definition for details. The first field is used as the primary key in SQL.
The temporary table is created with the provided schema, which should be of the form (
id UUID PRIMARY KEY, width int NOT NULL, height int NOT NULL
) I.e., it should omit everything before and after the "(" and ")", respectively.
func ValuesFromRecord ¶
func ValuesFromRecord(x interface{}) []interface{}
ValuesFromRecord returns a slice composed of the values of the fields in a struct marked with "db" tags.
For example, if x is an instance of a struct with definition
type Rectangle struct { Name string, Width int `db:"width"` Height int `db:"height"` },
where Width = 5 and Height = 10, it returns [5, 10].
func WeightsFromAggregatedUsageByQueue ¶ added in v0.3.35
func WeightsFromAggregatedUsageByQueue(resourceScarcity map[string]float64, priorityFactorByQueue map[string]float64, aggregateResourceUsageByQueue map[string]schedulerobjects.ResourceList) map[string]float64
Types ¶
type DbOperation ¶
type DbOperation interface { // a.Merge(b) attempts to merge b into a, creating a single combined op. // Returns true if merging was successful. // If successful, modifies a in-place. // If not successful, neither op is mutated. Merge(DbOperation) bool // a.CanBeAppliedBefore(b) returns true if a can be placed before b // without changing the end result of the overall set of operations. CanBeAppliedBefore(DbOperation) bool }
DbOperation captures a generic batch database operation.
There are 5 types of operations: - Insert jobs (i.e., add new jobs to the db). - Insert runs (i.e., add new runs to the db). - Job set operations (i.e., modify all jobs and runs in the db part of a given job set). - Job operations (i.e., modify particular jobs). - Job run operations (i.e., modify particular runs).
To improve performance, several ops can be merged into a single op if of the same type. To increase the number of ops that can be merged, ops can sometimes be reordered.
Specifically, an op can be applied before another if: - Insert jobs: if prior op doesn't affect the job set. - Insert runs: if prior op doesn't affect the job set or defines the corresponding job. - Job set operations: if not affecting a job defined in prior op. - Job operations: if not affecting a job defined in a prior op. - Job run operations: if not affecting a run defined in a prior op.
In addition, UpdateJobPriorities can never be applied beforee UpdateJobSetPriorities and vice versa, since one may overwrite values set by the other.
func AppendDbOperation ¶
func AppendDbOperation(ops []DbOperation, op DbOperation) []DbOperation
AppendDbOperation appends a sql operation, possibly merging it with a previous operation if that can be done in such a way that the end result of applying the entire sequence of operations is unchanged.
func DbOpsFromEventInSequence ¶
func DbOpsFromEventInSequence(sequence *armadaevents.EventSequence, i int) ([]DbOperation, error)
DbOpsFromEventInSequence returns a DbOperation produced from the i-th event in sequence, or nil if the i-th event doesn't correspond to any DbOperation.
type DbOperationsBatcher ¶
type DbOperationsBatcher struct { In chan *eventutil.EventSequenceWithMessageIds Out chan *DbOperationsWithMessageIds // Max number of Pulsar messages to include with each batch. MaxMessages int // Max time interval between batches. MaxInterval time.Duration }
DbOperationsBatcher is a service that consumes event sequences and produces optimised sequences of database operations.
TODO: Move into dbopsfromevents.go
func NewDbOperationsBatcher ¶
func NewDbOperationsBatcher(in chan *eventutil.EventSequenceWithMessageIds) *DbOperationsBatcher
type DbOperationsWithMessageIds ¶
type DbOperationsWithMessageIds struct { Ops []DbOperation MessageIds []pulsar.MessageID }
DbOperationsWithMessageIds bundles a sequence of db ops with the ids of all Pulsar messages that were consumed to produce it.
type DbOpsWriter ¶
type DbOpsWriter struct { In chan *DbOperationsWithMessageIds // Connection to the postgres database. Db *pgxpool.Pool // Pulsar consumer used to ack messages. Consumer pulsar.Consumer // Optional logger. // If not provided, the default logrus logger is used. Logger *logrus.Entry }
Service that writes DbOperations into postgres.
type ErrLostLeadership ¶
func (*ErrLostLeadership) Error ¶
func (err *ErrLostLeadership) Error() string
type ExecutorApi ¶
type ExecutorApi struct { api.UnimplementedAggregatedQueueServer // Embed the Redis-backed event server. // Provides methods for dual-publishing events etc. // // TODO: Can't import due to import cycle. // *server.EventServer Producer pulsar.Producer Db *pgxpool.Pool MaxJobsPerCall int32 }
func (*ExecutorApi) RenewLease ¶
func (srv *ExecutorApi) RenewLease(ctx context.Context, req *api.RenewLeaseRequest) (*api.IdList, error)
func (*ExecutorApi) ReportDone ¶
func (*ExecutorApi) ReportUsage ¶
func (srv *ExecutorApi) ReportUsage(ctx context.Context, req *api.ClusterUsageReport) (*types.Empty, error)
TODO: Does nothing for now.
func (*ExecutorApi) ReturnLease ¶
func (srv *ExecutorApi) ReturnLease(ctx context.Context, req *api.ReturnLeaseRequest) (*types.Empty, error)
func (*ExecutorApi) StreamingLeaseJobs ¶
func (srv *ExecutorApi) StreamingLeaseJobs(stream api.AggregatedQueue_StreamingLeaseJobsServer) error
type Ingester ¶
type Ingester struct { // Used to setup a Pulsar consumer. PulsarClient pulsar.Client ConsumerOptions pulsar.ConsumerOptions // Connection to the postgres database. Db *pgxpool.Pool // Write to postgres at least this often (assuming there are records to write). MaxWriteInterval time.Duration // Max number of DbOperation to batch. MaxDbOps int // Optional logger. // If not provided, the default logrus logger is used. Logger *logrus.Entry // contains filtered or unexported fields }
Service that updates the scheduler database.
At a high level, the ingester: 1. Reads messages from Pulsar, which are used to create DbOperations. 2. Db ops are collected for up to some amount of time. 3. Db ops are applied to postgres in batch. 4. The Pulsar messages read to produce the ops are acked.
type InsertJobErrors ¶
func (InsertJobErrors) CanBeAppliedBefore ¶
func (a InsertJobErrors) CanBeAppliedBefore(b DbOperation) bool
func (InsertJobErrors) Merge ¶
func (a InsertJobErrors) Merge(b DbOperation) bool
type InsertJobRunErrors ¶
type InsertJobRunErrors map[int32]*JobRunError
func (InsertJobRunErrors) CanBeAppliedBefore ¶
func (a InsertJobRunErrors) CanBeAppliedBefore(b DbOperation) bool
func (InsertJobRunErrors) Merge ¶
func (a InsertJobRunErrors) Merge(b DbOperation) bool
type InsertJobs ¶
Db operations (implements DbOperation).
func (InsertJobs) CanBeAppliedBefore ¶
func (a InsertJobs) CanBeAppliedBefore(b DbOperation) bool
func (InsertJobs) Merge ¶
func (a InsertJobs) Merge(b DbOperation) bool
type InsertRunAssignments ¶
type InsertRunAssignments map[uuid.UUID]*JobRunAssignment
func (InsertRunAssignments) CanBeAppliedBefore ¶
func (a InsertRunAssignments) CanBeAppliedBefore(b DbOperation) bool
func (InsertRunAssignments) Merge ¶
func (a InsertRunAssignments) Merge(b DbOperation) bool
type InsertRuns ¶
func (InsertRuns) CanBeAppliedBefore ¶
func (a InsertRuns) CanBeAppliedBefore(b DbOperation) bool
func (InsertRuns) Merge ¶
func (a InsertRuns) Merge(b DbOperation) bool
type Job ¶
type Job struct { JobID uuid.UUID `db:"job_id"` JobSet string `db:"job_set"` Queue string `db:"queue"` UserID string `db:"user_id"` Groups []string `db:"groups"` Priority int64 `db:"priority"` Cancelled bool `db:"cancelled"` Succeeded bool `db:"succeeded"` Failed bool `db:"failed"` SubmitMessage []byte `db:"submit_message"` SchedulingInfo []byte `db:"scheduling_info"` Serial int64 `db:"serial"` LastModified time.Time `db:"last_modified"` }
type JobRunAssignment ¶
type JobRunError ¶
type JobRuns ¶
type JobRuns struct { // Any runs associated with this job that have not terminated. // Map from run id to run. ActiveRuns map[uuid.UUID]*Run // Any runs associated with this job for which the ingester has received // a terminal event (i.e., succeeded, failed, or cancelled). // Map from run id to run. InactiveRuns map[uuid.UUID]*Run }
JobRuns is a collection of all runs associated with a particular job.
func NewJobRuns ¶
func NewJobRuns() *JobRuns
type JobSchedulingReport ¶ added in v0.3.35
type JobSchedulingReport struct { // Time at which this report was created. Timestamp time.Time // Id of the job this pod corresponds to. JobId uuid.UUID // Job spec. Job *api.Job // Scheduling requirements of this job. // We currently require that each job contains exactly one pod spec. Req *schedulerobjects.PodRequirements // Executor this job was assigned to. // If empty, the job could not be scheduled, // and the UnschedulableReason is populated. ExecutorId string // Resources assigned to this queue during this invocation of the scheduler, // if the job were to be scheduled. // // TODO: Only store in the scheduler round report. RoundQueueResources schedulerobjects.ResourceList // Total Resources assigned to this queue, // if the job were to be scheduled. // // TODO: Only store in the scheduler round report. TotalQueueResources schedulerobjects.ResourceList // Total Resources assigned to this queue by priority, // if the job were to be scheduled. // // TODO: Only store in the scheduler round report. TotalQueueResourcesByPriority schedulerobjects.QuantityByPriorityAndResourceType // Reason for why the job could not be scheduled. // Empty if the job was scheduled successfully. UnschedulableReason string // Scheduling reports for the individual pods that make up the job. PodSchedulingReports []*PodSchedulingReport }
JobSchedulingReport is created by the scheduler and contains information about the decision made by the scheduler for this job.
func (*JobSchedulingReport) String ¶ added in v0.3.35
func (report *JobSchedulingReport) String() string
type JobSetOperation ¶
type JobsIterator ¶ added in v0.3.35
type LeaderElection ¶
type LeaderElection struct { Db *pgxpool.Pool // Id uniquely identifying this leader in this epoch. Id uuid.UUID // Interval between database operations. // Used both while leader and follower. Interval time.Duration // Time after which this instance may try to become leader // if there has been no activity from the current leader. Timeout time.Duration // Optional logger. // If not provided, the default logrus logger is used. Logger *logrus.Entry }
LeaderElection is used by the scheduler to ensure only one instance is creating leases at a time.
The leader election process is based on timeouts. The leader continually writes its id into postgres. Non-leader instances monitor the last_modified value set by postgres for that row. If last_modified exceeds some threshold, a non-leader tries to become leader. It does so by, in a transaction, marking its own id as the leader.
func NewLeaderElection ¶
func NewLeaderElection(db *pgxpool.Pool) *LeaderElection
func (*LeaderElection) BecomeLeader ¶
func (srv *LeaderElection) BecomeLeader(ctx context.Context) error
BecomeLeader returns once this instance has become the leader.
Checks once per interval if the current leader has gone missing and, if so, tries to become leader.
func (*LeaderElection) StayLeader ¶
func (srv *LeaderElection) StayLeader(ctx context.Context) error
StayLeader writes into postgres every interval to ensure no other replica attempts to become leader.
type Leaderelection ¶
type LegacyScheduler ¶ added in v0.3.35
type LegacyScheduler struct { SchedulingConfig configuration.SchedulingConfig // Executor for which we're currently scheduling jobs. ExecutorId string // Total resources across all clusters. // Used when computing resource limits. TotalResources schedulerobjects.ResourceList // Contains all nodes to be considered for scheduling. // Used for matching pods with nodes. NodeDb *NodeDb // Used to request jobs from Redis and to mark jobs as leased. JobRepository SchedulerJobRepository // Minimum quantity allowed for jobs leased to this cluster. MinimumJobSize map[string]resource.Quantity // These factors influence the fraction of resources assigned to each queue. PriorityFactorByQueue map[string]float64 // Random number generator, used to select queues Rand *rand.Rand // Report on the results of the most recent invocation of the scheduler. SchedulingRoundReport *SchedulingRoundReport }
func NewLegacyScheduler ¶ added in v0.3.35
func NewLegacyScheduler( schedulingConfig configuration.SchedulingConfig, executorId string, totalResources schedulerobjects.ResourceList, nodes []*schedulerobjects.Node, jobRepository SchedulerJobRepository, priorityFactorByQueue map[string]float64, ) (*LegacyScheduler, error)
func (*LegacyScheduler) Schedule ¶ added in v0.3.35
func (c *LegacyScheduler) Schedule( ctx context.Context, initialUsageByQueue map[string]schedulerobjects.QuantityByPriorityAndResourceType, ) ([]*api.Job, error)
Schedule is similar to distributeRemainder, but is built on NodeDb.
func (*LegacyScheduler) String ¶ added in v0.3.35
func (sched *LegacyScheduler) String() string
type MarkJobSetsCancelled ¶
func (MarkJobSetsCancelled) AffectsJobSet ¶
func (a MarkJobSetsCancelled) AffectsJobSet(jobSet string) bool
func (MarkJobSetsCancelled) CanBeAppliedBefore ¶
func (a MarkJobSetsCancelled) CanBeAppliedBefore(b DbOperation) bool
func (MarkJobSetsCancelled) Merge ¶
func (a MarkJobSetsCancelled) Merge(b DbOperation) bool
type MarkJobsCancelled ¶
func (MarkJobsCancelled) CanBeAppliedBefore ¶
func (a MarkJobsCancelled) CanBeAppliedBefore(b DbOperation) bool
func (MarkJobsCancelled) Merge ¶
func (a MarkJobsCancelled) Merge(b DbOperation) bool
type MarkJobsFailed ¶
func (MarkJobsFailed) CanBeAppliedBefore ¶
func (a MarkJobsFailed) CanBeAppliedBefore(b DbOperation) bool
func (MarkJobsFailed) Merge ¶
func (a MarkJobsFailed) Merge(b DbOperation) bool
type MarkJobsSucceeded ¶
func (MarkJobsSucceeded) CanBeAppliedBefore ¶
func (a MarkJobsSucceeded) CanBeAppliedBefore(b DbOperation) bool
func (MarkJobsSucceeded) Merge ¶
func (a MarkJobsSucceeded) Merge(b DbOperation) bool
type MarkRunsFailed ¶
func (MarkRunsFailed) CanBeAppliedBefore ¶
func (a MarkRunsFailed) CanBeAppliedBefore(b DbOperation) bool
func (MarkRunsFailed) Merge ¶
func (a MarkRunsFailed) Merge(b DbOperation) bool
type MarkRunsRunning ¶
func (MarkRunsRunning) CanBeAppliedBefore ¶
func (a MarkRunsRunning) CanBeAppliedBefore(b DbOperation) bool
func (MarkRunsRunning) Merge ¶
func (a MarkRunsRunning) Merge(b DbOperation) bool
type MarkRunsSucceeded ¶
func (MarkRunsSucceeded) CanBeAppliedBefore ¶
func (a MarkRunsSucceeded) CanBeAppliedBefore(b DbOperation) bool
func (MarkRunsSucceeded) Merge ¶
func (a MarkRunsSucceeded) Merge(b DbOperation) bool
type NodeDb ¶ added in v0.3.35
type NodeDb struct { // In-memory database. Stores *SchedulerNode. // Used to efficiently iterate over nodes in sorted order. Db *memdb.MemDB // Set of node types for which there exists at least 1 node in the db. NodeTypes map[string]*schedulerobjects.NodeType // Resources allocated by the scheduler to in-flight jobs, // i.e., jobs for which resource usage is not yet reported by the executor. AssignedByNode map[string]schedulerobjects.AssignedByPriorityAndResourceType // Map from job id to the set of nodes on which that job has been assigned resources. // Used to clear AssignedByNode once jobs start running. NodesByJob map[uuid.UUID]map[string]interface{} // Map from node id to the set of jobs that have resourced assigned to them on that node. // Used to clear AssignedByNode once jobs start running. JobsByNode map[string]map[uuid.UUID]interface{} // contains filtered or unexported fields }
NodeDb is the scheduler-internal system for storing node information. It's used to efficiently find nodes on which a pod can be scheduled.
func (*NodeDb) BindNodeToPod ¶ added in v0.3.35
func (nodeDb *NodeDb) BindNodeToPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements, node *schedulerobjects.Node) error
func (*NodeDb) MarkJobRunning ¶ added in v0.3.35
MarkJobRunning notifies the node db that this job is now running. When the nodes were bound to the job, resources on those nodes were marked as assigned in the node db. When the job is running, those resources are accounted for by the executor, and should no longer be marked as assigned in the node db.
TODO: This only clears AssignedByNode once there are no in-flight jobs for that node. We could improve it to clear AssignedByNode on a per-job basis.
func (*NodeDb) NodeTypesMatchingPod ¶ added in v0.3.35
func (nodeDb *NodeDb) NodeTypesMatchingPod(req *schedulerobjects.PodRequirements) ([]*schedulerobjects.NodeType, map[string]int, error)
NodeTypesMatchingPod returns a slice composed of all node types a given pod could be scheduled on, i.e., all node types with matching node selectors and no untolerated taints.
TODO: Update docstring.
func (*NodeDb) SelectAndBindNodeToPod ¶ added in v0.3.35
func (nodeDb *NodeDb) SelectAndBindNodeToPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements) (*PodSchedulingReport, error)
func (*NodeDb) SelectNodeForPod ¶ added in v0.3.35
func (nodeDb *NodeDb) SelectNodeForPod(jobId uuid.UUID, req *schedulerobjects.PodRequirements) (*PodSchedulingReport, error)
SelectAndBindNodeToPod selects a node on which the pod can be scheduled, and updates the internal state of the db to indicate that this pod is bound to that node.
type NodeItemAvailableResourceIndex ¶ added in v0.3.35
type NodeItemAvailableResourceIndex struct { // Resource name, e.g., "cpu", "gpu", or "memory". Resource string // Job priority. Priority int32 }
func (*NodeItemAvailableResourceIndex) FromArgs ¶ added in v0.3.35
func (s *NodeItemAvailableResourceIndex) FromArgs(args ...interface{}) ([]byte, error)
FromArgs computes the index key from a set of arguments. Takes a single argument resourceAmount of type uint64.
func (*NodeItemAvailableResourceIndex) FromObject ¶ added in v0.3.35
func (s *NodeItemAvailableResourceIndex) FromObject(raw interface{}) (bool, []byte, error)
FromObject extracts the index key from a *NodeItem object.
type NodeTypeResourceIterator ¶ added in v0.3.35
type NodeTypeResourceIterator struct {
// contains filtered or unexported fields
}
NodeTypeResourceIterator is an iterator over all nodes of a given nodeType, for which there's at least some specified amount of a given resource available. For example, all nodes of type "foo" for which there's at least 1Gi of memory available.
Available resources is the sum of unused resources and resources assigned to lower-priority jobs. Nodes are returned in sorted order, going from least to most of the specified resource available.
func NewNodeTypeResourceIterator ¶ added in v0.3.35
func NewNodeTypeResourceIterator(txn *memdb.Txn, resource string, priority int32, nodeType *schedulerobjects.NodeType, resourceAmount resource.Quantity) (*NodeTypeResourceIterator, error)
func (*NodeTypeResourceIterator) Next ¶ added in v0.3.35
func (it *NodeTypeResourceIterator) Next() interface{}
func (*NodeTypeResourceIterator) NextNodeItem ¶ added in v0.3.35
func (it *NodeTypeResourceIterator) NextNodeItem() *schedulerobjects.Node
func (*NodeTypeResourceIterator) WatchCh ¶ added in v0.3.35
func (it *NodeTypeResourceIterator) WatchCh() <-chan struct{}
type NodeTypesResourceIterator ¶ added in v0.3.35
type NodeTypesResourceIterator struct {
// contains filtered or unexported fields
}
NodeTypesResourceIterator extends NodeTypeResourceIterator to iterate over nodes of several node types. Nodes are returned in sorted order, going from least to most of the specified resource available.
func NewNodeTypesResourceIterator ¶ added in v0.3.35
func NewNodeTypesResourceIterator(txn *memdb.Txn, resource string, priority int32, nodeTypes []*schedulerobjects.NodeType, resourceQuantity resource.Quantity) (*NodeTypesResourceIterator, error)
func (*NodeTypesResourceIterator) Next ¶ added in v0.3.35
func (it *NodeTypesResourceIterator) Next() interface{}
func (*NodeTypesResourceIterator) NextNodeItem ¶ added in v0.3.35
func (it *NodeTypesResourceIterator) NextNodeItem() *schedulerobjects.Node
func (*NodeTypesResourceIterator) WatchCh ¶ added in v0.3.35
func (it *NodeTypesResourceIterator) WatchCh() <-chan struct{}
type NodeTypesResourceIteratorItem ¶ added in v0.3.35
type NodeTypesResourceIteratorItem struct {
// contains filtered or unexported fields
}
type NodeTypesResourceIteratorPQ ¶ added in v0.3.35
type NodeTypesResourceIteratorPQ []*NodeTypesResourceIteratorItem
A priority queue used by NodeTypesResourceIterator to return results from across several sub-iterators in order.
func (NodeTypesResourceIteratorPQ) Len ¶ added in v0.3.35
func (pq NodeTypesResourceIteratorPQ) Len() int
func (NodeTypesResourceIteratorPQ) Less ¶ added in v0.3.35
func (pq NodeTypesResourceIteratorPQ) Less(i, j int) bool
func (*NodeTypesResourceIteratorPQ) Pop ¶ added in v0.3.35
func (pq *NodeTypesResourceIteratorPQ) Pop() any
func (*NodeTypesResourceIteratorPQ) Push ¶ added in v0.3.35
func (pq *NodeTypesResourceIteratorPQ) Push(x any)
func (NodeTypesResourceIteratorPQ) Swap ¶ added in v0.3.35
func (pq NodeTypesResourceIteratorPQ) Swap(i, j int)
type PodSchedulingReport ¶ added in v0.3.35
type PodSchedulingReport struct { // Time at which this report was created. Timestamp time.Time // Id of the job this pod corresponds to. JobId uuid.UUID // Pod scheduling requirements. Req *schedulerobjects.PodRequirements // Resource type determined by the scheduler to be the hardest to satisfy // the scheduling requirements for. DominantResourceType string // Node the pod was assigned to. // If nil, the pod could not be assigned to any node. Node *schedulerobjects.Node // Score indicates how well the pod fits on the selected node. Score int // Number of node types that NumMatchedNodeTypes int // Number of node types excluded by reason. NumExcludedNodeTypesByReason map[string]int // Number of nodes excluded by reason. NumExcludedNodesByReason map[string]int }
PodSchedulingReport is returned by SelectAndBindNodeToPod and contains detailed information on the scheduling decision made for this pod.
func (*PodSchedulingReport) String ¶ added in v0.3.35
func (report *PodSchedulingReport) String() string
type Queries ¶
type Queries struct {
// contains filtered or unexported fields
}
func (*Queries) GetTopicMessageIds ¶
func (*Queries) MarkJobCancelledById ¶
Job cancellation
func (*Queries) MarkJobFailedById ¶
Job failed
func (*Queries) MarkJobRunCancelledByJobId ¶
Job run cancelled
func (*Queries) MarkJobRunFailedById ¶
Job run failed
func (*Queries) MarkJobRunRunningById ¶
Job run running
func (*Queries) MarkJobRunSucceededById ¶
Job run succeeded
func (*Queries) MarkJobRunsCancelledByJobId ¶
func (*Queries) MarkJobRunsCancelledBySet ¶
func (*Queries) MarkJobRunsCancelledBySets ¶
func (*Queries) MarkJobRunsFailedById ¶
func (*Queries) MarkJobRunsRunningById ¶
func (*Queries) MarkJobRunsSucceededById ¶
func (*Queries) MarkJobSucceededById ¶
Job succeeded
func (*Queries) MarkJobsCancelledById ¶
func (*Queries) MarkJobsCancelledBySet ¶
func (*Queries) MarkJobsCancelledBySets ¶
func (*Queries) MarkJobsFailedById ¶
func (*Queries) MarkJobsFailedBySet ¶
func (*Queries) MarkJobsFailedBySets ¶
func (*Queries) MarkJobsSucceededById ¶
func (*Queries) MarkJobsSucceededBySet ¶
func (*Queries) MarkJobsSucceededBySets ¶
func (*Queries) MarkRunsAsSent ¶
func (*Queries) MarkRunsAsSentByExecutorAndJobId ¶
func (q *Queries) MarkRunsAsSentByExecutorAndJobId(ctx context.Context, arg MarkRunsAsSentByExecutorAndJobIdParams) error
func (*Queries) SelectJobErrorsById ¶
Job errors
func (*Queries) SelectJobsFromIds ¶
Jobs
func (*Queries) SelectLeader ¶
func (q *Queries) SelectLeader(ctx context.Context) (Leaderelection, error)
Leader election Return the row associated with the current leader. If due to a bug several rows are marked as leader, return the most recently modified one.
func (*Queries) SelectNewActiveJobs ¶
func (*Queries) SelectNewJobErrors ¶
func (*Queries) SelectNewJobs ¶
func (*Queries) SelectNewNodeInfo ¶
NodeInfo
func (*Queries) SelectNewRunAssignments ¶
func (q *Queries) SelectNewRunAssignments(ctx context.Context, serial int64) ([]JobRunAssignment, error)
Job run assignments
func (*Queries) SelectNewRunErrors ¶
func (*Queries) SelectNewRunsForExecutorWithLimit ¶
func (*Queries) SelectNewRunsForJobs ¶
func (*Queries) SelectQueueJobSetFromId ¶
func (*Queries) SelectQueueJobSetFromIds ¶
func (*Queries) SelectReplicaById ¶
func (*Queries) SelectRunErrorsById ¶
Run errors
func (*Queries) SelectRunsFromExecutorAndJobs ¶
func (*Queries) SelectUnsentRunsForExecutor ¶
Runs
func (*Queries) UpdateJobPriorityById ¶
func (q *Queries) UpdateJobPriorityById(ctx context.Context, arg UpdateJobPriorityByIdParams) error
Job priority
func (*Queries) UpdateJobPriorityByJobSet ¶
func (q *Queries) UpdateJobPriorityByJobSet(ctx context.Context, arg UpdateJobPriorityByJobSetParams) error
func (*Queries) UpsertMessageId ¶
func (q *Queries) UpsertMessageId(ctx context.Context, arg UpsertMessageIdParams) error
type QueueCandidateJobsIterator ¶ added in v0.3.35
type QueueCandidateJobsIterator struct { LegacyScheduler // contains filtered or unexported fields }
QueueCandidateJobsIterator is an iterator over all jobs in a queue that could potentially be scheduled. Specifically, all jobs that - would not exceed per-round resource limits, - would not exceed total per-queue resource limits, and - for which there existed at the time of checking a node the job could be scheduled on.
Because other jobs may have been scheduled between this iterator finding a node for a job and the main scheduler thread considering that job, the main scheduling thread must verify that the job can still be assigned to this node.
func NewQueueCandidateJobsIterator ¶ added in v0.3.35
func NewQueueCandidateJobsIterator( ctx context.Context, queue string, initialTotalQueueResources schedulerobjects.ResourceList, initialTotalQueueResourcesByPriority schedulerobjects.QuantityByPriorityAndResourceType, scheduler LegacyScheduler, ) (*QueueCandidateJobsIterator, error)
func (*QueueCandidateJobsIterator) Lease ¶ added in v0.3.35
func (it *QueueCandidateJobsIterator) Lease(jobSchedulingReport *JobSchedulingReport)
Update the internal state of the iterator to reflect that a job was leased.
func (*QueueCandidateJobsIterator) Next ¶ added in v0.3.35
func (it *QueueCandidateJobsIterator) Next() (*JobSchedulingReport, error)
type QueueSchedulingReport ¶ added in v0.3.35
type QueueSchedulingReport struct { // Queue name. Name string MostRecentSuccessfulJobSchedulingReport *JobSchedulingReport MostRecentUnsuccessfulJobSchedulingReport *JobSchedulingReport }
QueueSchedulingReport contains job scheduling reports for the most recent successful and failed scheduling attempts for this queue.
func (*QueueSchedulingReport) String ¶ added in v0.3.35
func (report *QueueSchedulingReport) String() string
type QueuedJobsIterator ¶ added in v0.3.35
type QueuedJobsIterator struct {
// contains filtered or unexported fields
}
QueuedJobsIterator is an iterator over all jobs in a queue. It loads jobs in batches from Redis asynch.
func NewQueuedJobsIterator ¶ added in v0.3.35
func NewQueuedJobsIterator(ctx context.Context, queue string, repo SchedulerJobRepository) (*QueuedJobsIterator, error)
type Run ¶
type Run struct { RunID uuid.UUID `db:"run_id"` JobID uuid.UUID `db:"job_id"` JobSet string `db:"job_set"` Executor string `db:"executor"` SentToExecutor bool `db:"sent_to_executor"` Cancelled bool `db:"cancelled"` Running bool `db:"running"` Succeeded bool `db:"succeeded"` Failed bool `db:"failed"` Serial int64 `db:"serial"` LastModified time.Time `db:"last_modified"` }
type Scheduler ¶
type Scheduler struct { Producer pulsar.Producer Db *pgxpool.Pool // Map from job id to job struct. // Contains all jobs the scheduler is aware of that have not terminated, // i.e., succeed, failed, or been cancelled. // Jobs are added when read from postgres. // Jobs are removed when they terminate ActiveJobs map[uuid.UUID]*Job // Map from job id to a collection of all runs associated with that job. RunsByJobId map[uuid.UUID]*JobRuns // The queue consists of all active jobs that don't have at least one active run associated with it. // Ids of all jobs that don't have at least one active // Ids of jobs that have not yet been scheduled. QueuedJobIds []uuid.UUID // List of worker nodes available across all clusters. Nodes map[string]*Nodeinfo // Map from executor name to the last time we heard from that executor. Executors map[string]time.Time // Amount of time after which an executor is assumed to be unavailable // if no updates have been received. ExecutorAliveDuration time.Duration // Each write into postgres is marked with an increasing serial number. // For each table, we store the largest number seen so far. // When reading from postgres, we select all rows with a serial number larger than what we've seen so far. // // The first record written to postgres for each table will have serial 1, // so these should be initialised to 0. JobsSerial int64 RunsSerial int64 JobRunsAssignmentsSerial int64 JobErrorsSerial int64 JobRunErrorsSerial int64 NodesSerial int64 // Optional logger. // If not provided, the default logrus logger is used. Logger *logrus.Entry }
Scheduler implements a trivial scheduling algorithm. It's here just to test that the scheduling subsystem as a whole is working.
type SchedulerJobRepository ¶ added in v0.3.35
type SchedulerJobRepository interface { // GetQueueJobIds returns the ids of all queued jobs for some queue. GetQueueJobIds(queue string) ([]string, error) // GetExistingJobsByIds returns any jobs with an id in the provided list. GetExistingJobsByIds(jobIds []string) ([]*api.Job, error) // TryLeaseJobs tries to create jobs leases and returns the jobs that were successfully leased. // Leasing may fail, e.g., if the job was concurrently leased to another executor. TryLeaseJobs(clusterId string, queue string, jobs []*api.Job) ([]*api.Job, error) }
type SchedulingReportsRepository ¶ added in v0.3.35
type SchedulingReportsRepository struct { // Scheduling reports for the jobs that were most recently attempted to be scheduled. MostRecentJobSchedulingReports *lru.Cache // Scheduling reports for the most recently seen queues. MostRecentQueueSchedulingReports *lru.Cache }
SchedulingReportsRepository stores reports on the most recent scheduling attempts.
func NewSchedulingReportsRepository ¶ added in v0.3.35
func NewSchedulingReportsRepository(maxQueueSchedulingReports, maxJobSchedulingReports int) *SchedulingReportsRepository
func (*SchedulingReportsRepository) Add ¶ added in v0.3.35
func (repo *SchedulingReportsRepository) Add(queueName string, report *JobSchedulingReport)
func (*SchedulingReportsRepository) GetJobReport ¶ added in v0.3.35
func (repo *SchedulingReportsRepository) GetJobReport(ctx context.Context, jobId *schedulerobjects.JobId) (*schedulerobjects.JobReport, error)
func (*SchedulingReportsRepository) GetJobSchedulingReport ¶ added in v0.3.35
func (repo *SchedulingReportsRepository) GetJobSchedulingReport(jobId uuid.UUID) (*JobSchedulingReport, bool)
func (*SchedulingReportsRepository) GetQueueReport ¶ added in v0.3.35
func (repo *SchedulingReportsRepository) GetQueueReport(ctx context.Context, queue *schedulerobjects.Queue) (*schedulerobjects.QueueReport, error)
func (*SchedulingReportsRepository) GetQueueSchedulingReport ¶ added in v0.3.35
func (repo *SchedulingReportsRepository) GetQueueSchedulingReport(queueName string) (*QueueSchedulingReport, bool)
type SchedulingRoundReport ¶ added in v0.3.35
type SchedulingRoundReport struct { // Time at which the scheduling cycle started. Started time.Time // Time at which the scheduling cycle finished. Finished time.Time // Executor for which the scheduler was invoked. Executor string // These factors influence the fraction of resources assigned to each queue. PriorityFactorByQueue map[string]float64 // Resources assigned to each queue at the start of the scheduling cycle. InitialResourcesByQueueAndPriority map[string]schedulerobjects.QuantityByPriorityAndResourceType // Resources assigned to this queue during this scheduling cycle. ScheduledResourcesByQueueAndPriority map[string]schedulerobjects.QuantityByPriorityAndResourceType // Reports for all successful job scheduling attempts. SuccessfulJobSchedulingReportsByQueue map[string]map[uuid.UUID]*JobSchedulingReport // Reports for all unsuccessful job scheduling attempts. UnsuccessfulJobSchedulingReportsByQueue map[string]map[uuid.UUID]*JobSchedulingReport // Total resources across all clusters available at the start of the scheduling cycle. TotalResources schedulerobjects.ResourceList // Reason for why the scheduling round finished. TerminationReason string // contains filtered or unexported fields }
func NewSchedulingRoundReport ¶ added in v0.3.35
func NewSchedulingRoundReport(totalResources schedulerobjects.ResourceList, priorityFactorByQueue map[string]float64) *SchedulingRoundReport
func (*SchedulingRoundReport) AddJobSchedulingReport ¶ added in v0.3.35
func (schedulingRoundReport *SchedulingRoundReport) AddJobSchedulingReport(jobSchedulingReport *JobSchedulingReport)
Add a job scheduling report to the report for this invocation of the scheduler. Automatically updates scheduled resources by calling AddScheduledResources. Is thread-safe.
func (*SchedulingRoundReport) AddScheduledResources ¶ added in v0.3.35
func (schedulingRoundReport *SchedulingRoundReport) AddScheduledResources(queue string, req *schedulerobjects.PodRequirements)
Add scheduled resources for a queue. Called by AddJobSchedulingReport.
func (*SchedulingRoundReport) ClearJobSpecs ¶ added in v0.3.35
func (schedulingRoundReport *SchedulingRoundReport) ClearJobSpecs()
Zero out any job specs stored in job scheduling reports referenced from this round report, to reduce memory usage.
func (*SchedulingRoundReport) String ¶ added in v0.3.35
func (report *SchedulingRoundReport) String() string
type UpdateJobPriorities ¶
func (UpdateJobPriorities) CanBeAppliedBefore ¶
func (a UpdateJobPriorities) CanBeAppliedBefore(b DbOperation) bool
func (UpdateJobPriorities) Merge ¶
func (a UpdateJobPriorities) Merge(b DbOperation) bool
type UpdateJobSetPriorities ¶
func (UpdateJobSetPriorities) AffectsJobSet ¶
func (a UpdateJobSetPriorities) AffectsJobSet(jobSet string) bool
func (UpdateJobSetPriorities) CanBeAppliedBefore ¶
func (a UpdateJobSetPriorities) CanBeAppliedBefore(b DbOperation) bool
func (UpdateJobSetPriorities) Merge ¶
func (a UpdateJobSetPriorities) Merge(b DbOperation) bool