Documentation ¶
Index ¶
- Constants
- func GetSplitFields(table *model.TableInfo, splitFields []string) ([]*model.ColumnInfo, error)
- type BucketIterator
- type ChunkIterator
- type LimitIterator
- type RandomIterator
- type RangeInfo
- func (r *RangeInfo) Copy() *RangeInfo
- func (r *RangeInfo) GetBucketIndexLeft() int
- func (r *RangeInfo) GetBucketIndexRight() int
- func (r *RangeInfo) GetChunk() *chunk.Range
- func (r *RangeInfo) GetChunkIndex() int
- func (r *RangeInfo) GetTableIndex() int
- func (r *RangeInfo) ToNode() *checkpoints.Node
- func (r *RangeInfo) Update(column, lower, upper string, updateLower, updateUpper bool, ...)
Constants ¶
const DefaultChannelBuffer = 1024
DefaultChannelBuffer is the default size for channel buffer
const (
// SplitThreshold is the threshold for splitting
SplitThreshold = 1000
)
Variables ¶
This section is empty.
Functions ¶
func GetSplitFields ¶
GetSplitFields returns fields to split chunks, order by pk, uk, index, columns.
Types ¶
type BucketIterator ¶
type BucketIterator struct {
// contains filtered or unexported fields
}
BucketIterator is struct for bucket iterator
func NewBucketIterator ¶
func NewBucketIterator(ctx context.Context, progressID string, table *common.TableDiff, dbConn *sql.DB) (*BucketIterator, error)
NewBucketIterator return a new iterator
func NewBucketIteratorWithCheckpoint ¶
func NewBucketIteratorWithCheckpoint( ctx context.Context, progressID string, table *common.TableDiff, dbConn *sql.DB, startRange *RangeInfo, bucketSpliterPool *utils.WorkerPool, ) (*BucketIterator, error)
NewBucketIteratorWithCheckpoint return a new iterator
func (*BucketIterator) GetIndexID ¶
func (s *BucketIterator) GetIndexID() int64
GetIndexID return the index id
type ChunkIterator ¶
type ChunkIterator interface { // Next seeks the next chunk, return nil if seeks to end. Next() (*chunk.Range, error) // Close close the current iterator. Close() }
ChunkIterator generate next chunk for only one table lazily.
type LimitIterator ¶
type LimitIterator struct {
// contains filtered or unexported fields
}
LimitIterator is the iterator with limit
func NewLimitIterator ¶
func NewLimitIterator(ctx context.Context, progressID string, table *common.TableDiff, dbConn *sql.DB) (*LimitIterator, error)
NewLimitIterator return a new iterator
func NewLimitIteratorWithCheckpoint ¶
func NewLimitIteratorWithCheckpoint( ctx context.Context, progressID string, table *common.TableDiff, dbConn *sql.DB, startRange *RangeInfo, ) (*LimitIterator, error)
NewLimitIteratorWithCheckpoint return a new iterator
func (*LimitIterator) GetIndexID ¶
func (lmt *LimitIterator) GetIndexID() int64
GetIndexID get the current index id
type RandomIterator ¶
type RandomIterator struct {
// contains filtered or unexported fields
}
RandomIterator is used to random iterate a table
func NewRandomIterator ¶
func NewRandomIterator(ctx context.Context, progressID string, table *common.TableDiff, dbConn *sql.DB) (*RandomIterator, error)
NewRandomIterator return a new iterator
type RangeInfo ¶
type RangeInfo struct { ChunkRange *chunk.Range `json:"chunk-range"` // for bucket checkpoint IndexID int64 `json:"index-id"` ProgressID string `json:"progress-id"` }
RangeInfo represents the unit of a process chunk. It's the only entrance of checkpoint.
func FromNode ¶
func FromNode(n *checkpoints.Node) *RangeInfo
FromNode converts the Node into RangeInfo
func (*RangeInfo) GetBucketIndexLeft ¶
GetBucketIndexLeft returns the BucketIndexLeft
func (*RangeInfo) GetBucketIndexRight ¶
GetBucketIndexRight returns the BucketIndexRight
func (*RangeInfo) GetChunkIndex ¶
GetChunkIndex returns the ChunkIndex
func (*RangeInfo) GetTableIndex ¶
GetTableIndex returns the index of table diffs. IMPORTANT!!! We need to keep the tables order during checkpoint. So we should have to save the config info to checkpoint file too
func (*RangeInfo) ToNode ¶
func (r *RangeInfo) ToNode() *checkpoints.Node
ToNode converts RangeInfo to node