Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Job ¶ added in v0.0.13
type Job struct {
// contains filtered or unexported fields
}
func (*Job) MarshalLogObject ¶ added in v0.0.13
func (j *Job) MarshalLogObject(enc zapcore.ObjectEncoder) error
type JobsPlanner ¶ added in v0.0.14
type JobsPlanner struct { sync.Mutex AvailableJobs chan *Job // contains filtered or unexported fields }
func NewJobsPlanner ¶ added in v0.0.14
func (*JobsPlanner) JobCount ¶ added in v0.0.14
func (p *JobsPlanner) JobCount() int
func (*JobsPlanner) MarshalLogObject ¶ added in v0.0.14
func (p *JobsPlanner) MarshalLogObject(enc zapcore.ObjectEncoder) error
func (*JobsPlanner) SignalCompletionUpUntil ¶ added in v0.0.14
func (p *JobsPlanner) SignalCompletionUpUntil(storeName string, blockNum uint64)
type Scheduler ¶
type Scheduler struct {
// contains filtered or unexported fields
}
func NewScheduler ¶
func NewScheduler(ctx context.Context, availableJobs chan *Job, squasher *Squasher, workerPool *WorkerPool, respFunc substreams.ResponseFunc) (*Scheduler, error)
type Snapshots ¶ added in v0.0.14
type Snapshots struct { Completes block.Ranges // Shortest completes first, largest last. Partials block.Ranges // First partials first, last last }
func (*Snapshots) ContainsPartial ¶ added in v0.0.14
func (*Snapshots) LastCompleteSnapshotBefore ¶ added in v0.0.14
func (*Snapshots) LastCompletedBlock ¶ added in v0.0.14
type Squasher ¶
Squasher produces _complete_ stores, by merging backing partial stores.
func NewSquasher ¶
func NewSquasher(ctx context.Context, workPlan WorkPlan, stores map[string]*state.Store, reqStartBlock uint64, jobsPlanner *JobsPlanner) (*Squasher, error)
NewSquasher receives stores, initializes them and fetches them from the existing storage. It prepares itself to receive Squash() requests that should correspond to what is missing for those stores to reach `targetExclusiveEndBlock`. This is managed externally by the Scheduler/Strategy. Eventually, ideally, all components are synchronizes around the actual data: the state of storages present, the requests needed to fill in those stores up to the target block, etc..
type StorageState ¶
func FetchStorageState ¶
func NewStorageState ¶
func NewStorageState() *StorageState
func (*StorageState) String ¶ added in v0.0.14
func (s *StorageState) String() string
type StoreSquasher ¶ added in v0.0.14
func NewStoreSquasher ¶ added in v0.0.14
func NewStoreSquasher(initialStore *state.Store, targetExclusiveBlock, nextExpectedStartBlock uint64, jobsPlanner *JobsPlanner) *StoreSquasher
func (*StoreSquasher) IsEmpty ¶ added in v0.0.14
func (s *StoreSquasher) IsEmpty() bool
func (*StoreSquasher) String ¶ added in v0.0.14
func (s *StoreSquasher) String() string
type WorkPlan ¶ added in v0.0.14
func (WorkPlan) ProgressMessages ¶ added in v0.0.14
func (p WorkPlan) ProgressMessages(hostname string) (out []*pbsubstreams.ModuleProgress)
func (WorkPlan) SquashPartialsPresent ¶ added in v0.0.14
type WorkerPool ¶ added in v0.0.13
type WorkerPool struct {
// contains filtered or unexported fields
}
func NewWorkerPool ¶ added in v0.0.13
func NewWorkerPool(workerCount int, originalRequestModules *pbsubstreams.Modules, grpcClientFactory substreams.GrpcClientFactory) *WorkerPool
func (*WorkerPool) Borrow ¶ added in v0.0.13
func (p *WorkerPool) Borrow() *Worker
func (*WorkerPool) ReturnWorker ¶ added in v0.0.13
func (p *WorkerPool) ReturnWorker(worker *Worker)
Click to show internal directories.
Click to hide internal directories.