Documentation ¶
Index ¶
- Constants
- Variables
- func ApplyStateKey(regionID uint64) []byte
- func BindRespError(resp *raft_cmdpb.RaftCmdResponse, err error)
- func BindRespTerm(resp *raft_cmdpb.RaftCmdResponse, term uint64)
- func BootstrapStore(engines *Engines, clussterID, storeID uint64) error
- func CheckKeyInRegion(key []byte, region *metapb.Region) error
- func CheckKeyInRegionExclusive(key []byte, region *metapb.Region) error
- func CheckKeyInRegionInclusive(key []byte, region *metapb.Region) error
- func CheckRegionEpoch(req *raft_cmdpb.RaftCmdRequest, region *metapb.Region, includeRegion bool) error
- func ClearMeta(engines *Engines, kvWB, raftWB *WriteBatch, regionID uint64, lastIndex uint64) error
- func ClearPrepareBootstrap(engines *Engines, regionID uint64) error
- func ClearPrepareBootstrapState(engines *Engines) error
- func CloneMsg(origin, cloned proto.Message) error
- func CompactRaftLog(tag string, state *applyState, compactIndex, compactTerm uint64) error
- func CreateRaftLogCompactionFilter(targetLevel int, startKey, endKey []byte) badger.CompactionFilter
- func ErrResp(err error) *raft_cmdpb.RaftCmdResponse
- func ErrRespRegionNotFound(regionID uint64) *raft_cmdpb.RaftCmdResponse
- func ErrRespStaleCommand(term uint64) *raft_cmdpb.RaftCmdResponse
- func ErrRespWithTerm(err error, term uint64) *raft_cmdpb.RaftCmdResponse
- func GetChangePeerCmd(msg *raft_cmdpb.RaftCmdRequest) *raft_cmdpb.ChangePeerRequest
- func IsEpochStale(epoch *metapb.RegionEpoch, checkEpoch *metapb.RegionEpoch) bool
- func IsUrgentRequest(rlog raftlog.RaftLog) bool
- func NewCustomWriteBatch(startTS, commitTS uint64, ctx *kvrpcpb.Context) mvcc.WriteBatch
- func NewDBWriter(conf *config.Config, router *RaftstoreRouter) mvcc.DBWriter
- func NewTestRaftWriter(dbBundle *mvcc.DBBundle, engine *Engines) mvcc.DBWriter
- func NotifyReqRegionRemoved(regionId uint64, cb *Callback)
- func NotifyStaleReq(term uint64, cb *Callback)
- func PeerEqual(l, r *metapb.Peer) bool
- func PrepareBootstrap(engins *Engines, storeID, regionID, peerID uint64) (*metapb.Region, error)
- func Quorum(total int) int
- func RaftLogIndex(key []byte) (uint64, error)
- func RaftLogKey(regionID, index uint64) []byte
- func RaftStateKey(regionID uint64) []byte
- func RaftstoreErrToPbError(e error) *errorpb.Error
- func RawEndKey(region *metapb.Region) []byte
- func RawStartKey(region *metapb.Region) []byte
- func RegionEqual(l, r *metapb.Region) bool
- func RegionMetaPrefixKey(regionID uint64) []byte
- func RegionRaftPrefixKey(regionID uint64) []byte
- func RegionStateKey(regionID uint64) []byte
- func RestoreLockStore(offset uint64, bundle *mvcc.DBBundle, raftDB *badger.DB) error
- func SnapshotRaftStateKey(regionID uint64) []byte
- func TimeToU64(t time.Time) uint64
- func U64ToTime(u uint64) time.Time
- func WritePeerState(kvWB *WriteBatch, region *metapb.Region, state rspb.PeerState, ...)
- type ApplyOptions
- type ApplyResult
- type ApplySnapResult
- type CFFile
- type CFName
- type CacheQueryStats
- type Callback
- type Config
- type ConsistencyState
- type DestroyPeerJob
- type Engines
- type EntryCache
- type ErrEpochNotMatch
- type ErrKeyNotInRegion
- type ErrNotLeader
- type ErrRaftEntryTooLarge
- type ErrRegionNotFound
- type ErrServerIsBusy
- type ErrStaleCommand
- type ErrStoreNotMatch
- type GenSnapTask
- type GlobalContext
- type IOLimiter
- type InvokeContext
- type JobStatus
- type LeaderChecker
- type Lease
- type LeaseState
- type LimitWriter
- type MetaFile
- type Msg
- type MsgComputeHashResult
- type MsgGCSnap
- type MsgHalfSplitRegion
- type MsgMergeResult
- type MsgRaftCmd
- type MsgSignificant
- type MsgSignificantType
- type MsgSplitRegion
- type MsgStoreClearRegionSizeInRange
- type MsgType
- type Node
- type Peer
- func (p *Peer) Activate(applyMsgs *applyMsgs)
- func (p *Peer) AnyNewPeerCatchUp(peerId uint64) bool
- func (p *Peer) ApplyReads(kv *mvcc.DBBundle, ready *raft.Ready)
- func (p *Peer) CheckPeers()
- func (p *Peer) CheckStaleState(cfg *Config) StaleState
- func (p *Peer) CollectDownPeers(maxDuration time.Duration) []*pdpb.PeerStats
- func (p *Peer) CollectPendingPeers() []*metapb.Peer
- func (p *Peer) Destroy(engine *Engines, keepData bool) error
- func (p *Peer) GetMinProgress() uint64
- func (p *Peer) GetRaftStatus() *raft.Status
- func (p *Peer) GetRole() raft.StateType
- func (p *Peer) HandleRaftReadyAppend(trans Transport, applyMsgs *applyMsgs, kvWB, raftWB *WriteBatch, ...) *ReadyICPair
- func (p *Peer) HandleRaftReadyApply(kv *mvcc.DBBundle, applyMsgs *applyMsgs, ready *raft.Ready)
- func (p *Peer) HasPendingSnapshot() bool
- func (p *Peer) HeartbeatPd(pdScheduler chan<- task)
- func (p *Peer) IsApplyingSnapshot() bool
- func (p *Peer) IsLeader() bool
- func (p *Peer) LeaderId() uint64
- func (p *Peer) MaybeCampaign(parentIsLeader bool) bool
- func (p *Peer) MaybeDestroy() *DestroyPeerJob
- func (p *Peer) MaybeRenewLeaderLease(ts time.Time)
- func (p *Peer) OnRoleChanged(observer PeerEventObserver, ready *raft.Ready)
- func (p *Peer) PeerId() uint64
- func (p *Peer) PostApply(kv *mvcc.DBBundle, applyState applyState, appliedIndexTerm uint64, merged bool, ...) bool
- func (p *Peer) PostPropose(meta *ProposalMeta, isConfChange bool, cb *Callback)
- func (p *Peer) PostRaftReadyPersistent(trans Transport, applyMsgs *applyMsgs, ready *raft.Ready, ...) *ApplySnapResult
- func (p *Peer) PostSplit()
- func (p *Peer) PrePropose(cfg *Config, rlog raftlog.RaftLog) (*ProposalContext, error)
- func (p *Peer) Propose(kv *mvcc.DBBundle, cfg *Config, cb *Callback, rlog raftlog.RaftLog, ...) bool
- func (p *Peer) ProposeConfChange(cfg *Config, req *raft_cmdpb.RaftCmdRequest) (uint64, error)
- func (p *Peer) ProposeNormal(cfg *Config, rlog raftlog.RaftLog) (uint64, error)
- func (p *Peer) ProposeTransferLeader(cfg *Config, req *raft_cmdpb.RaftCmdRequest, cb *Callback) bool
- func (p *Peer) ReadyToHandlePendingSnap() bool
- func (p *Peer) Region() *metapb.Region
- func (p *Peer) Send(trans Transport, msgs []eraftpb.Message) error
- func (p *Peer) SetRegion(region *metapb.Region)
- func (p *Peer) Step(m *eraftpb.Message) error
- func (p *Peer) Stop()
- func (p *Peer) Store() *PeerStorage
- func (p *Peer) TakeApplyProposals() *regionProposal
- func (p *Peer) Term() uint64
- type PeerEventContext
- type PeerEventObserver
- type PeerStat
- type PeerStorage
- func (ps *PeerStorage) Append(invokeCtx *InvokeContext, entries []eraftpb.Entry, raftWB *WriteBatch) error
- func (ps *PeerStorage) AppliedIndex() uint64
- func (ps *PeerStorage) ApplySnapshot(ctx *InvokeContext, snap *eraftpb.Snapshot, kvWB *WriteBatch, ...) error
- func (p *PeerStorage) CancelApplyingSnap() bool
- func (p *PeerStorage) CheckApplyingSnap() bool
- func (ps *PeerStorage) ClearData() error
- func (ps *PeerStorage) CompactTo(idx uint64)
- func (ps *PeerStorage) Entries(low, high, maxSize uint64) ([]eraftpb.Entry, error)
- func (ps *PeerStorage) FirstIndex() (uint64, error)
- func (ps *PeerStorage) InitialState() (eraftpb.HardState, eraftpb.ConfState, error)
- func (ps *PeerStorage) IsApplyingSnapshot() bool
- func (ps *PeerStorage) LastIndex() (uint64, error)
- func (ps *PeerStorage) MaybeGCCache(replicatedIdx, appliedIdx uint64)
- func (ps *PeerStorage) PostReadyPersistent(ctx *InvokeContext) *ApplySnapResult
- func (ps *PeerStorage) Region() *metapb.Region
- func (ps *PeerStorage) SaveReadyState(kvWB, raftWB *WriteBatch, ready *raft.Ready) (*InvokeContext, error)
- func (ps *PeerStorage) ScheduleApplyingSnapshot()
- func (ps *PeerStorage) SetRegion(region *metapb.Region)
- func (ps *PeerStorage) Snapshot() (eraftpb.Snapshot, error)
- func (ps *PeerStorage) Term(idx uint64) (uint64, error)
- type PeerTick
- type ProposalContext
- type ProposalMeta
- type ProposalQueue
- type RaftClient
- type RaftContext
- type RaftError
- type RaftInnerServer
- func (ris *RaftInnerServer) BatchRaft(stream tikvpb.Tikv_BatchRaftServer) error
- func (ris *RaftInnerServer) GetRaftstoreRouter() *RaftstoreRouter
- func (ris *RaftInnerServer) GetStoreMeta() *metapb.Store
- func (ris *RaftInnerServer) Raft(stream tikvpb.Tikv_RaftServer) error
- func (ris *RaftInnerServer) SetPeerEventObserver(ob PeerEventObserver)
- func (ris *RaftInnerServer) Setup(pdClient pd.Client)
- func (ris *RaftInnerServer) Snapshot(stream tikvpb.Tikv_SnapshotServer) error
- func (ris *RaftInnerServer) Start(pdClient pd.Client) error
- func (ris *RaftInnerServer) Stop() error
- type RaftstoreRouter
- type ReadExecutor
- type ReadIndexQueue
- type ReadIndexRequest
- type ReadyICPair
- type RecentAddedPeer
- type RemoteLease
- type ReqCbPair
- type RequestInspector
- type RequestPolicy
- type ServerTransport
- func (t *ServerTransport) Flush()
- func (t *ServerTransport) ReportSnapshotStatus(msg *raft_serverpb.RaftMessage, status raft.SnapshotStatus)
- func (t *ServerTransport) ReportUnreachable(msg *raft_serverpb.RaftMessage)
- func (t *ServerTransport) Send(msg *raft_serverpb.RaftMessage) error
- func (t *ServerTransport) SendSnapshotSock(msg *raft_serverpb.RaftMessage)
- type Snap
- func NewSnap(dir string, key SnapKey, sizeTrack *int64, isSending, toBuild bool, ...) (*Snap, error)
- func NewSnapForApplying(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error)
- func NewSnapForBuilding(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter, ...) (*Snap, error)
- func NewSnapForReceiving(dir string, key SnapKey, snapshotMeta *rspb.SnapshotMeta, sizeTrack *int64, ...) (*Snap, error)
- func NewSnapForSending(dir string, key SnapKey, sizeTrack *int64, deleter SnapshotDeleter) (*Snap, error)
- func (s *Snap) Apply(opts ApplyOptions) (ApplyResult, error)
- func (s *Snap) Build(dbSnap *regionSnapshot, region *metapb.Region, snapData *rspb.RaftSnapshotData, ...) error
- func (s *Snap) Delete()
- func (s *Snap) Drop()
- func (s *Snap) Exists() bool
- func (s *Snap) Meta() (os.FileInfo, error)
- func (s *Snap) Path() string
- func (s *Snap) Read(b []byte) (int, error)
- func (s *Snap) Save() error
- func (s *Snap) TotalSize() (total uint64)
- func (s *Snap) Write(b []byte) (int, error)
- type SnapEntry
- type SnapKey
- type SnapKeyWithSending
- type SnapManager
- func (sm *SnapManager) DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool
- func (sm *SnapManager) Deregister(key SnapKey, entry SnapEntry)
- func (sm *SnapManager) GetSnapshotForApplying(snapKey SnapKey) (Snapshot, error)
- func (sm *SnapManager) GetSnapshotForBuilding(key SnapKey) (Snapshot, error)
- func (sm *SnapManager) GetSnapshotForReceiving(snapKey SnapKey, data []byte) (Snapshot, error)
- func (sm *SnapManager) GetSnapshotForSending(snapKey SnapKey) (Snapshot, error)
- func (sm *SnapManager) GetTotalSnapSize() uint64
- func (sm *SnapManager) HasRegistered(key SnapKey) bool
- func (sm *SnapManager) ListIdleSnap() ([]SnapKeyWithSending, error)
- func (sm *SnapManager) Register(key SnapKey, entry SnapEntry)
- func (sm *SnapManager) Stats() SnapStats
- type SnapManagerBuilder
- type SnapState
- type SnapStateType
- type SnapStatistics
- type SnapStats
- type Snapshot
- type SnapshotDeleter
- type StaleState
- type StoreContext
- type StoreLabel
- type StoreTick
- type TestRaftWriter
- func (w *TestRaftWriter) Close()
- func (w *TestRaftWriter) DeleteRange(start, end []byte, latchHandle mvcc.LatchHandle) error
- func (w *TestRaftWriter) NewWriteBatch(startTS, commitTS uint64, ctx *kvrpcpb.Context) mvcc.WriteBatch
- func (w *TestRaftWriter) Open()
- func (w *TestRaftWriter) Write(batch mvcc.WriteBatch) error
- type Transport
- type WaitApplyResultState
- type WriteBatch
- func (wb *WriteBatch) Delete(key y.Key)
- func (wb *WriteBatch) DeleteLock(key []byte)
- func (wb *WriteBatch) Len() int
- func (wb *WriteBatch) MustWriteToKV(db *mvcc.DBBundle)
- func (wb *WriteBatch) MustWriteToRaft(db *badger.DB)
- func (wb *WriteBatch) Reset()
- func (wb *WriteBatch) Rollback(key y.Key)
- func (wb *WriteBatch) RollbackToSafePoint()
- func (wb *WriteBatch) Set(key y.Key, val []byte)
- func (wb *WriteBatch) SetLock(key, val []byte)
- func (wb *WriteBatch) SetMsg(key y.Key, msg proto.Message) error
- func (wb *WriteBatch) SetOpLock(key y.Key, userMeta []byte)
- func (wb *WriteBatch) SetSafePoint()
- func (wb *WriteBatch) SetWithUserMeta(key y.Key, val, userMeta []byte)
- func (wb *WriteBatch) WriteToKV(bundle *mvcc.DBBundle) error
- func (wb *WriteBatch) WriteToRaft(db *badger.DB) error
Constants ¶
const ( DefaultApplyWBSize = 4 * 1024 WriteTypeFlagPut = 'P' WriteTypeFlagDelete = 'D' WriteTypeFlagLock = 'L' WriteTypeFlagRollback = 'R' )
const ( InitEpochVer uint64 = 1 InitEpochConfVer uint64 = 1 KvTS uint64 = 1 RaftTS uint64 = 0 )
const ( KB uint64 = 1024 MB uint64 = 1024 * 1024 SplitSizeMb uint64 = 96 )
const ( LocalPrefix byte = 0x01 // We save two types region data in DB, for raft and other meta data. // When the store starts, we should iterate all region meta data to // construct peer, no need to travel large raft data, so we separate them // with different prefixes. RegionRaftPrefix byte = 0x02 RegionMetaPrefix byte = 0x03 RegionRaftLogLen = 19 // REGION_RAFT_PREFIX_KEY + region_id + suffix + index // Following are the suffix after the local prefix. // For region id RaftLogSuffix byte = 0x01 RaftStateSuffix byte = 0x02 ApplyStateSuffix byte = 0x03 SnapshotRaftStateSuffix byte = 0x04 // For region meta RegionStateSuffix byte = 0x01 )
const ( MaxCheckClusterBootstrappedRetryCount = 60 CheckClusterBootstrapRetrySeconds = 3 )
const ( // When we create a region peer, we should initialize its log term/index > 0, // so that we can force the follower peer to sync the snapshot first. RaftInitLogTerm = 5 RaftInitLogIndex = 5 MaxSnapRetryCnt = 5 MaxCacheCapacity = 1024 - 1 )
const ( NSEC_PER_MSEC uint64 = 1000000 SEC_SHIFT uint64 = 10 MSEC_MASK uint64 = (1 << SEC_SHIFT) - 1 )
const InvalidID uint64 = 0
const LockstoreFileName = "lockstore.dump"
const MaxDeleteBatchSize int = 32 * 1024
In our tests, we found that if the batch size is too large, running deleteAllInRange will reduce OLTP QPS by 30% ~ 60%. We found that 32K is a proper choice.
const RaftInvalidIndex uint64 = 0
Variables ¶
var ( MinKey = []byte{0} // Data key has two prefix, meta 'm' and table 't', // extra keys has prefix 'm' + 1 = 'n', // extra table keys has prefix 't' + 1 = 'u', end key would be 'v'. MinDataKey = []byte{'m'} MaxDataKey = []byte{'v'} RegionMetaMinKey = []byte{LocalPrefix, RegionMetaPrefix} RegionMetaMaxKey = []byte{LocalPrefix, RegionMetaPrefix + 1} )
Functions ¶
func ApplyStateKey ¶
func BindRespError ¶
func BindRespError(resp *raft_cmdpb.RaftCmdResponse, err error)
func BindRespTerm ¶
func BindRespTerm(resp *raft_cmdpb.RaftCmdResponse, term uint64)
func BootstrapStore ¶
func CheckKeyInRegion ¶
/ Check if key in region range [`start_key`, `end_key`).
func CheckKeyInRegionExclusive ¶
/ Check if key in region range (`start_key`, `end_key`).
func CheckKeyInRegionInclusive ¶
/ Check if key in region range [`start_key`, `end_key`].
func CheckRegionEpoch ¶
func CheckRegionEpoch(req *raft_cmdpb.RaftCmdRequest, region *metapb.Region, includeRegion bool) error
func ClearMeta ¶
func ClearMeta(engines *Engines, kvWB, raftWB *WriteBatch, regionID uint64, lastIndex uint64) error
func ClearPrepareBootstrap ¶
func CompactRaftLog ¶
CompactRaftLog discards all log entries prior to compact_index. We must guarantee that the compact_index is not greater than applied index.
func CreateRaftLogCompactionFilter ¶
func CreateRaftLogCompactionFilter(targetLevel int, startKey, endKey []byte) badger.CompactionFilter
func ErrResp ¶
func ErrResp(err error) *raft_cmdpb.RaftCmdResponse
func ErrRespRegionNotFound ¶
func ErrRespRegionNotFound(regionID uint64) *raft_cmdpb.RaftCmdResponse
func ErrRespStaleCommand ¶
func ErrRespStaleCommand(term uint64) *raft_cmdpb.RaftCmdResponse
func ErrRespWithTerm ¶
func ErrRespWithTerm(err error, term uint64) *raft_cmdpb.RaftCmdResponse
func GetChangePeerCmd ¶
func GetChangePeerCmd(msg *raft_cmdpb.RaftCmdRequest) *raft_cmdpb.ChangePeerRequest
func IsEpochStale ¶
func IsEpochStale(epoch *metapb.RegionEpoch, checkEpoch *metapb.RegionEpoch) bool
/ check whether epoch is staler than check_epoch.
func IsUrgentRequest ¶
/ We enable follower lazy commit to get a better performance. / But it may not be appropriate for some requests. This function / checks whether the request should be committed on all followers / as soon as possible.
func NewCustomWriteBatch ¶
func NewCustomWriteBatch(startTS, commitTS uint64, ctx *kvrpcpb.Context) mvcc.WriteBatch
func NewDBWriter ¶
func NewDBWriter(conf *config.Config, router *RaftstoreRouter) mvcc.DBWriter
func NewTestRaftWriter ¶
func NotifyReqRegionRemoved ¶
func NotifyStaleReq ¶
func PrepareBootstrap ¶
func RaftLogIndex ¶
/ RaftLogIndex gets the log index from raft log key generated by `raft_log_key`.
func RaftLogKey ¶
func RaftStateKey ¶
func RaftstoreErrToPbError ¶
func RawStartKey ¶
Get the `start_key` of current region in encoded form.
func RegionEqual ¶
func RegionMetaPrefixKey ¶
func RegionRaftPrefixKey ¶
func RegionStateKey ¶
func RestoreLockStore ¶
func SnapshotRaftStateKey ¶
func WritePeerState ¶
func WritePeerState(kvWB *WriteBatch, region *metapb.Region, state rspb.PeerState, mergeState *rspb.MergeState)
Types ¶
type ApplyOptions ¶
type ApplyResult ¶
type ApplyResult struct { HasPut bool RegionState *rspb.RegionLocalState }
type ApplySnapResult ¶
type CacheQueryStats ¶
type CacheQueryStats struct {
// contains filtered or unexported fields
}
type Callback ¶
type Callback struct {
// contains filtered or unexported fields
}
func NewCallback ¶
func NewCallback() *Callback
func (*Callback) Done ¶
func (cb *Callback) Done(resp *raft_cmdpb.RaftCmdResponse)
type Config ¶
type Config struct { // true for high reliability, prevent data loss when power failure. SyncLog bool // minimizes disruption when a partitioned node rejoins the cluster by using a two phase election. Prevote bool RaftdbPath string SnapPath string // store capacity. 0 means no limit. Capacity uint64 // raft_base_tick_interval is a base tick interval (ms). RaftBaseTickInterval time.Duration RaftHeartbeatTicks int RaftElectionTimeoutTicks int RaftMinElectionTimeoutTicks int RaftMaxElectionTimeoutTicks int RaftMaxSizePerMsg uint64 RaftMaxInflightMsgs int // When the entry exceed the max size, reject to propose it. RaftEntryMaxSize uint64 // Interval to gc unnecessary raft log (ms). RaftLogGCTickInterval time.Duration // A threshold to gc stale raft log, must >= 1. RaftLogGcThreshold uint64 // When entry count exceed this value, gc will be forced trigger. RaftLogGcCountLimit uint64 // When the approximate size of raft log entries exceed this value, // gc will be forced trigger. RaftLogGcSizeLimit uint64 // When a peer is not responding for this time, leader will not keep entry cache for it. RaftEntryCacheLifeTime time.Duration // When a peer is newly added, reject transferring leader to the peer for a while. RaftRejectTransferLeaderDuration time.Duration // Interval (ms) to check region whether need to be split or not. SplitRegionCheckTickInterval time.Duration /// When size change of region exceed the diff since last check, it /// will be checked again whether it should be split. RegionSplitCheckDiff uint64 /// Interval (ms) to check whether start compaction for a region. RegionCompactCheckInterval time.Duration // delay time before deleting a stale peer CleanStalePeerDelay time.Duration /// Number of regions for each time checking. RegionCompactCheckStep uint64 /// Minimum number of tombstones to trigger manual compaction. RegionCompactMinTombstones uint64 /// Minimum percentage of tombstones to trigger manual compaction. /// Should between 1 and 100. RegionCompactTombstonesPencent uint64 PdHeartbeatTickInterval time.Duration PdStoreHeartbeatTickInterval time.Duration SnapMgrGcTickInterval time.Duration SnapGcTimeout time.Duration NotifyCapacity uint64 MessagesPerTick uint64 /// When a peer is not active for max_peer_down_duration, /// the peer is considered to be down and is reported to PD. MaxPeerDownDuration time.Duration /// If the leader of a peer is missing for longer than max_leader_missing_duration, /// the peer would ask pd to confirm whether it is valid in any region. /// If the peer is stale and is not valid in any region, it will destroy itself. MaxLeaderMissingDuration time.Duration /// Similar to the max_leader_missing_duration, instead it will log warnings and /// try to alert monitoring systems, if there is any. AbnormalLeaderMissingDuration time.Duration PeerStaleStateCheckInterval time.Duration LeaderTransferMaxLogLag uint64 SnapApplyBatchSize uint64 // Interval (ms) to check region whether the data is consistent. ConsistencyCheckInterval time.Duration ReportRegionFlowInterval time.Duration // The lease provided by a successfully proposed and applied entry. RaftStoreMaxLeaderLease time.Duration // Right region derive origin region id when split. RightDeriveWhenSplit bool AllowRemoveLeader bool /// Max log gap allowed to propose merge. MergeMaxLogGap uint64 /// Interval to re-propose merge. MergeCheckTickInterval time.Duration UseDeleteRange bool ApplyMaxBatchSize uint64 ApplyPoolSize uint64 StoreMaxBatchSize uint64 ConcurrentSendSnapLimit uint64 ConcurrentRecvSnapLimit uint64 GrpcInitialWindowSize uint64 GrpcKeepAliveTime time.Duration GrpcKeepAliveTimeout time.Duration GrpcRaftConnNum uint64 Addr string AdvertiseAddr string Labels []StoreLabel SplitCheck *splitCheckConfig }
func NewDefaultConfig ¶
func NewDefaultConfig() *Config
type ConsistencyState ¶
type ConsistencyState struct { LastCheckTime time.Time // (computed_result_or_to_be_verified, index, hash) Index uint64 Hash []byte }
/ `ConsistencyState` is used for consistency check.
type DestroyPeerJob ¶
type Engines ¶
type Engines struct {
// contains filtered or unexported fields
}
func NewEngines ¶
func (*Engines) SyncRaftWAL ¶
func (*Engines) WriteKV ¶
func (en *Engines) WriteKV(wb *WriteBatch) error
func (*Engines) WriteRaft ¶
func (en *Engines) WriteRaft(wb *WriteBatch) error
type EntryCache ¶
type EntryCache struct {
// contains filtered or unexported fields
}
type ErrEpochNotMatch ¶
func (*ErrEpochNotMatch) Error ¶
func (e *ErrEpochNotMatch) Error() string
type ErrKeyNotInRegion ¶
func (*ErrKeyNotInRegion) Error ¶
func (e *ErrKeyNotInRegion) Error() string
type ErrNotLeader ¶
func (*ErrNotLeader) Error ¶
func (e *ErrNotLeader) Error() string
type ErrRaftEntryTooLarge ¶
func (*ErrRaftEntryTooLarge) Error ¶
func (e *ErrRaftEntryTooLarge) Error() string
type ErrRegionNotFound ¶
type ErrRegionNotFound struct {
RegionId uint64
}
func (*ErrRegionNotFound) Error ¶
func (e *ErrRegionNotFound) Error() string
type ErrServerIsBusy ¶
func (*ErrServerIsBusy) Error ¶
func (e *ErrServerIsBusy) Error() string
type ErrStaleCommand ¶
type ErrStaleCommand struct{}
func (*ErrStaleCommand) Error ¶
func (e *ErrStaleCommand) Error() string
type ErrStoreNotMatch ¶
func (*ErrStoreNotMatch) Error ¶
func (e *ErrStoreNotMatch) Error() string
type GenSnapTask ¶
type GenSnapTask struct {
// contains filtered or unexported fields
}
type GlobalContext ¶
type GlobalContext struct {
// contains filtered or unexported fields
}
type InvokeContext ¶
type InvokeContext struct { RegionID uint64 RaftState raftState ApplyState applyState SnapRegion *metapb.Region // contains filtered or unexported fields }
func NewInvokeContext ¶
func NewInvokeContext(store *PeerStorage) *InvokeContext
type LeaderChecker ¶
type LeaderChecker interface {
IsLeader(ctx *kvrpcpb.Context, router *RaftstoreRouter) *errorpb.Error
}
type Lease ¶
type Lease struct {
// contains filtered or unexported fields
}
/ Lease records an expired time, for examining the current moment is in lease or not. / It's dedicated to the Raft leader lease mechanism, contains either state of / 1. Suspect Timestamp / A suspicious leader lease timestamp, which marks the leader may still hold or lose / its lease until the clock time goes over this timestamp. / 2. Valid Timestamp / A valid leader lease timestamp, which marks the leader holds the lease for now. / The lease is valid until the clock time goes over this timestamp. / / ```text / Time / |----------------------------------> / ^ ^ / Now Suspect TS / State: | Suspect | Suspect / / |----------------------------------> / ^ ^ / Now Valid TS / State: | Valid | Expired / ``` / / Note: / - Valid timestamp would increase when raft log entries are applied in current term. / - Suspect timestamp would be set after the message `MsgTimeoutNow` is sent by current peer. / The message `MsgTimeoutNow` starts a leader transfer procedure. During this procedure, / current peer as an old leader may still hold its lease or lose it. / It's possible there is a new leader elected and current peer as an old leader / doesn't step down due to network partition from the new leader. In that case, / current peer lose its leader lease. / Within this suspect leader lease expire time, read requests could not be performed / locally. / - The valid leader lease should be `lease = max_lease - (commit_ts - send_ts)` / And the expired timestamp for that leader lease is `commit_ts + lease`, / which is `send_ts + max_lease` in short.
func (*Lease) ExpireRemoteLease ¶
func (l *Lease) ExpireRemoteLease()
func (*Lease) Inspect ¶
func (l *Lease) Inspect(ts *time.Time) LeaseState
/ Inspect the lease state for the ts or now.
func (*Lease) MaybeNewRemoteLease ¶
func (l *Lease) MaybeNewRemoteLease(term uint64) *RemoteLease
/ Return a new `RemoteLease` if there is none.
type LeaseState ¶
type LeaseState int
const ( /// The lease is suspicious, may be invalid. LeaseState_Suspect LeaseState = 1 + iota /// The lease is valid. LeaseState_Valid /// The lease is expired. LeaseState_Expired )
type LimitWriter ¶
type LimitWriter struct {
// contains filtered or unexported fields
}
type Msg ¶
func NewPeerMsg ¶
type MsgComputeHashResult ¶
type MsgGCSnap ¶
type MsgGCSnap struct {
Snaps []SnapKeyWithSending
}
type MsgHalfSplitRegion ¶
type MsgHalfSplitRegion struct {
RegionEpoch *metapb.RegionEpoch
}
type MsgMergeResult ¶
type MsgRaftCmd ¶
type MsgSignificant ¶
type MsgSignificant struct { Type MsgSignificantType ToPeerID uint64 SnapshotStatus raft.SnapshotStatus }
type MsgSignificantType ¶
type MsgSignificantType int
const ( MsgSignificantTypeStatus MsgSignificantType = 1 MsgSignificantTypeUnreachable MsgSignificantType = 2 )
type MsgSplitRegion ¶
type MsgSplitRegion struct { RegionEpoch *metapb.RegionEpoch // It's an encoded key. // TODO: support meta key. SplitKeys [][]byte Callback *Callback }
type MsgType ¶
type MsgType int64
const ( MsgTypeNull MsgType = 0 MsgTypeRaftMessage MsgType = 1 MsgTypeRaftCmd MsgType = 2 MsgTypeSplitRegion MsgType = 3 MsgTypeComputeResult MsgType = 4 MsgTypeRegionApproximateSize MsgType = 5 MsgTypeRegionApproximateKeys MsgType = 6 MsgTypeCompactionDeclineBytes MsgType = 7 MsgTypeHalfSplitRegion MsgType = 8 MsgTypeMergeResult MsgType = 9 MsgTypeGcSnap MsgType = 10 MsgTypeClearRegionSize MsgType = 11 MsgTypeTick MsgType = 12 MsgTypeSignificantMsg MsgType = 13 MsgTypeStart MsgType = 14 MsgTypeApplyRes MsgType = 15 MsgTypeNoop MsgType = 16 MsgTypeStoreRaftMessage MsgType = 101 MsgTypeStoreSnapshotStats MsgType = 102 // Clear region size and keys for all regions in the range, so we can force them to re-calculate // their size later. MsgTypeStoreClearRegionSizeInRange MsgType = 104 MsgTypeStoreCompactedEvent MsgType = 105 MsgTypeStoreTick MsgType = 106 MsgTypeStoreStart MsgType = 107 MsgTypeFsmNormal MsgType = 201 MsgTypeFsmControl MsgType = 202 MsgTypeApply MsgType = 301 MsgTypeApplyRegistration MsgType = 302 MsgTypeApplyProposal MsgType = 303 MsgTypeApplyCatchUpLogs MsgType = 304 MsgTypeApplyLogsUpToDate MsgType = 305 MsgTypeApplyDestroy MsgType = 306 MsgTypeApplySnapshot MsgType = 307 )
type Node ¶
type Node struct {
// contains filtered or unexported fields
}
func (*Node) BootstrapCluster ¶
type Peer ¶
type Peer struct { Meta *metapb.Peer RaftGroup *raft.RawNode // Record the last instant of each peer's heartbeat response. PeerHeartbeats map[uint64]time.Time /// Record the instants of peers being added into the configuration. /// Remove them after they are not pending any more. PeersStartPendingTime map[uint64]time.Time RecentAddedPeer *RecentAddedPeer /// an inaccurate difference in region size since last reset. SizeDiffHint uint64 /// approximate size of the region. ApproximateSize *uint64 /// approximate keys of the region. ApproximateKeys *uint64 CompactionDeclinedBytes uint64 ConsistencyState *ConsistencyState Tag string // Index of last scheduled committed raft log. LastApplyingIdx uint64 LastCompactedIdx uint64 // Approximate size of logs that is applied but not compacted yet. RaftLogSizeHint uint64 PendingRemove bool PendingMergeState *rspb.MergeState PendingMergeApplyResult *WaitApplyResultState PeerStat PeerStat // contains filtered or unexported fields }
func (*Peer) Activate ¶
func (p *Peer) Activate(applyMsgs *applyMsgs)
/ Register self to applyMsgs so that the peer is then usable. / Also trigger `RegionChangeEvent::Create` here.
func (*Peer) AnyNewPeerCatchUp ¶
/ Returns `true` if any new peer catches up with the leader in replicating logs. / And updates `PeersStartPendingTime` if needed.
func (*Peer) CheckPeers ¶
func (p *Peer) CheckPeers()
/ Checks and updates `peer_heartbeats` for the peer.
func (*Peer) CheckStaleState ¶
func (p *Peer) CheckStaleState(cfg *Config) StaleState
func (*Peer) CollectDownPeers ¶
/ Collects all down peers.
func (*Peer) CollectPendingPeers ¶
/ Collects all pending peers and update `peers_start_pending_time`.
func (*Peer) Destroy ¶
/ Does the real destroy task which includes: / 1. Set the region to tombstone; / 2. Clear data; / 3. Notify all pending requests.
func (*Peer) GetMinProgress ¶
func (*Peer) GetRaftStatus ¶
func (*Peer) HandleRaftReadyAppend ¶
func (p *Peer) HandleRaftReadyAppend(trans Transport, applyMsgs *applyMsgs, kvWB, raftWB *WriteBatch, observer PeerEventObserver) *ReadyICPair
func (*Peer) HandleRaftReadyApply ¶
func (*Peer) HasPendingSnapshot ¶
/ Returns `true` if the raft group has replicated a snapshot but not committed it yet.
func (*Peer) HeartbeatPd ¶
func (p *Peer) HeartbeatPd(pdScheduler chan<- task)
func (*Peer) IsApplyingSnapshot ¶
func (*Peer) MaybeCampaign ¶
func (*Peer) MaybeDestroy ¶
func (p *Peer) MaybeDestroy() *DestroyPeerJob
/ Tries to destroy itself. Returns a job (if needed) to do more cleaning tasks.
func (*Peer) MaybeRenewLeaderLease ¶
Try to renew leader lease.
func (*Peer) OnRoleChanged ¶
func (p *Peer) OnRoleChanged(observer PeerEventObserver, ready *raft.Ready)
func (*Peer) PostPropose ¶
func (p *Peer) PostPropose(meta *ProposalMeta, isConfChange bool, cb *Callback)
func (*Peer) PostRaftReadyPersistent ¶
func (p *Peer) PostRaftReadyPersistent(trans Transport, applyMsgs *applyMsgs, ready *raft.Ready, invokeCtx *InvokeContext) *ApplySnapResult
func (*Peer) PrePropose ¶
func (*Peer) Propose ¶
func (p *Peer) Propose(kv *mvcc.DBBundle, cfg *Config, cb *Callback, rlog raftlog.RaftLog, errResp *raft_cmdpb.RaftCmdResponse) bool
Propose a request.
Return true means the request has been proposed successfully.
func (*Peer) ProposeConfChange ¶
func (p *Peer) ProposeConfChange(cfg *Config, req *raft_cmdpb.RaftCmdRequest) (uint64, error)
Fails in such cases: 1. A pending conf change has not been applied yet; 2. Removing the leader is not allowed in the configuration; 3. The conf change makes the raft group not healthy; 4. The conf change is dropped by raft group internally.
func (*Peer) ProposeNormal ¶
func (*Peer) ProposeTransferLeader ¶
func (p *Peer) ProposeTransferLeader(cfg *Config, req *raft_cmdpb.RaftCmdRequest, cb *Callback) bool
Return true if the transfer leader request is accepted.
func (*Peer) ReadyToHandlePendingSnap ¶
func (*Peer) SetRegion ¶
/ Set the region of a peer. / / This will update the region of the peer, caller must ensure the region / has been preserved in a durable device.
func (*Peer) Store ¶
func (p *Peer) Store() *PeerStorage
func (*Peer) TakeApplyProposals ¶
func (p *Peer) TakeApplyProposals() *regionProposal
type PeerEventContext ¶
type PeerEventContext struct { LeaderChecker LeaderChecker RegionId uint64 }
type PeerEventObserver ¶
type PeerEventObserver interface { // OnPeerCreate will be invoked when there is a new peer created. OnPeerCreate(ctx *PeerEventContext, region *metapb.Region) // OnPeerApplySnap will be invoked when there is a replicate peer's snapshot applied. OnPeerApplySnap(ctx *PeerEventContext, region *metapb.Region) // OnPeerDestroy will be invoked when a peer is destroyed. OnPeerDestroy(ctx *PeerEventContext) // OnSplitRegion will be invoked when region split into new regions with corresponding peers. OnSplitRegion(derived *metapb.Region, regions []*metapb.Region, peers []*PeerEventContext) // OnRegionConfChange will be invoked after conf change updated region's epoch. OnRegionConfChange(ctx *PeerEventContext, epoch *metapb.RegionEpoch) // OnRoleChange will be invoked after peer state has changed OnRoleChange(regionId uint64, newState raft.StateType) }
type PeerStorage ¶
func NewPeerStorage ¶
func (*PeerStorage) Append ¶
func (ps *PeerStorage) Append(invokeCtx *InvokeContext, entries []eraftpb.Entry, raftWB *WriteBatch) error
Append the given entries to the raft log using previous last index or self.last_index. Return the new last index for later update. After we commit in engine, we can set last_index to the return one.
func (*PeerStorage) AppliedIndex ¶
func (ps *PeerStorage) AppliedIndex() uint64
func (*PeerStorage) ApplySnapshot ¶
func (ps *PeerStorage) ApplySnapshot(ctx *InvokeContext, snap *eraftpb.Snapshot, kvWB *WriteBatch, raftWB *WriteBatch) error
Apply the peer with given snapshot.
func (*PeerStorage) CancelApplyingSnap ¶
func (p *PeerStorage) CancelApplyingSnap() bool
func (*PeerStorage) CheckApplyingSnap ¶
func (p *PeerStorage) CheckApplyingSnap() bool
Check if the storage is applying a snapshot.
func (*PeerStorage) ClearData ¶
func (ps *PeerStorage) ClearData() error
func (*PeerStorage) CompactTo ¶
func (ps *PeerStorage) CompactTo(idx uint64)
func (*PeerStorage) Entries ¶
func (ps *PeerStorage) Entries(low, high, maxSize uint64) ([]eraftpb.Entry, error)
func (*PeerStorage) FirstIndex ¶
func (ps *PeerStorage) FirstIndex() (uint64, error)
func (*PeerStorage) InitialState ¶
func (*PeerStorage) IsApplyingSnapshot ¶
func (ps *PeerStorage) IsApplyingSnapshot() bool
func (*PeerStorage) LastIndex ¶
func (ps *PeerStorage) LastIndex() (uint64, error)
func (*PeerStorage) MaybeGCCache ¶
func (ps *PeerStorage) MaybeGCCache(replicatedIdx, appliedIdx uint64)
func (*PeerStorage) PostReadyPersistent ¶
func (ps *PeerStorage) PostReadyPersistent(ctx *InvokeContext) *ApplySnapResult
Update the memory state after ready changes are flushed to disk successfully.
func (*PeerStorage) Region ¶
func (ps *PeerStorage) Region() *metapb.Region
func (*PeerStorage) SaveReadyState ¶
func (ps *PeerStorage) SaveReadyState(kvWB, raftWB *WriteBatch, ready *raft.Ready) (*InvokeContext, error)
/ Save memory states to disk. / / This function only write data to `ready_ctx`'s `WriteBatch`. It's caller's duty to write / it explicitly to disk. If it's flushed to disk successfully, `post_ready` should be called / to update the memory states properly. / Do not modify ready in this function, this is a requirement to advance the ready object properly later.
func (*PeerStorage) ScheduleApplyingSnapshot ¶
func (ps *PeerStorage) ScheduleApplyingSnapshot()
func (*PeerStorage) SetRegion ¶
func (ps *PeerStorage) SetRegion(region *metapb.Region)
type ProposalContext ¶
type ProposalContext byte
const ( ProposalContext_SyncLog ProposalContext = 1 ProposalContext_Split ProposalContext = 1 << 1 ProposalContext_PrepareMerge ProposalContext = 1 << 2 )
func NewProposalContextFromBytes ¶
func NewProposalContextFromBytes(ctx []byte) *ProposalContext
func (ProposalContext) ToBytes ¶
func (c ProposalContext) ToBytes() []byte
type ProposalMeta ¶
type ProposalQueue ¶
type ProposalQueue struct {
// contains filtered or unexported fields
}
func (*ProposalQueue) Clear ¶
func (q *ProposalQueue) Clear()
func (*ProposalQueue) PopFront ¶
func (q *ProposalQueue) PopFront(term uint64) *ProposalMeta
func (*ProposalQueue) Push ¶
func (q *ProposalQueue) Push(meta *ProposalMeta)
type RaftClient ¶
func (*RaftClient) Flush ¶
func (c *RaftClient) Flush()
func (*RaftClient) Send ¶
func (c *RaftClient) Send(msg *raft_serverpb.RaftMessage)
func (*RaftClient) Stop ¶
func (c *RaftClient) Stop()
type RaftContext ¶
type RaftContext struct { *GlobalContext ReadyRes []*ReadyICPair // contains filtered or unexported fields }
type RaftInnerServer ¶
type RaftInnerServer struct {
// contains filtered or unexported fields
}
func NewRaftInnerServer ¶
func NewRaftInnerServer(globalConfig *config.Config, engines *Engines, raftConfig *Config) *RaftInnerServer
func (*RaftInnerServer) BatchRaft ¶
func (ris *RaftInnerServer) BatchRaft(stream tikvpb.Tikv_BatchRaftServer) error
func (*RaftInnerServer) GetRaftstoreRouter ¶
func (ris *RaftInnerServer) GetRaftstoreRouter() *RaftstoreRouter
func (*RaftInnerServer) GetStoreMeta ¶
func (ris *RaftInnerServer) GetStoreMeta() *metapb.Store
func (*RaftInnerServer) Raft ¶
func (ris *RaftInnerServer) Raft(stream tikvpb.Tikv_RaftServer) error
func (*RaftInnerServer) SetPeerEventObserver ¶
func (ris *RaftInnerServer) SetPeerEventObserver(ob PeerEventObserver)
func (*RaftInnerServer) Setup ¶
func (ris *RaftInnerServer) Setup(pdClient pd.Client)
func (*RaftInnerServer) Snapshot ¶
func (ris *RaftInnerServer) Snapshot(stream tikvpb.Tikv_SnapshotServer) error
func (*RaftInnerServer) Stop ¶
func (ris *RaftInnerServer) Stop() error
type RaftstoreRouter ¶
type RaftstoreRouter struct {
// contains filtered or unexported fields
}
RaftstoreRouter exports SendCommand method for other packages.
func (*RaftstoreRouter) SendCommand ¶
func (r *RaftstoreRouter) SendCommand(req *raft_cmdpb.RaftCmdRequest, cb *Callback) error
func (*RaftstoreRouter) SplitRegion ¶
type ReadExecutor ¶
type ReadExecutor struct {
// contains filtered or unexported fields
}
func NewReadExecutor ¶
func NewReadExecutor(checkEpoch bool) *ReadExecutor
func (*ReadExecutor) Execute ¶
func (r *ReadExecutor) Execute(msg *raft_cmdpb.RaftCmdRequest, region *metapb.Region) *raft_cmdpb.RaftCmdResponse
type ReadIndexQueue ¶
type ReadIndexQueue struct {
// contains filtered or unexported fields
}
func (*ReadIndexQueue) ClearUncommitted ¶
func (r *ReadIndexQueue) ClearUncommitted(term uint64)
func (*ReadIndexQueue) NextId ¶
func (r *ReadIndexQueue) NextId() uint64
func (*ReadIndexQueue) PopFront ¶
func (q *ReadIndexQueue) PopFront() *ReadIndexRequest
type ReadIndexRequest ¶
type ReadIndexRequest struct {
// contains filtered or unexported fields
}
func NewReadIndexRequest ¶
func NewReadIndexRequest(id uint64, cmds []*ReqCbPair, renewLeaseTime *time.Time) *ReadIndexRequest
type ReadyICPair ¶
type ReadyICPair struct { Ready raft.Ready IC *InvokeContext }
type RecentAddedPeer ¶
func NewRecentAddedPeer ¶
func NewRecentAddedPeer(rejectDurationAsSecs uint64) *RecentAddedPeer
func (*RecentAddedPeer) Contains ¶
func (r *RecentAddedPeer) Contains(id uint64) bool
type RemoteLease ¶
type RemoteLease struct {
// contains filtered or unexported fields
}
/ A remote lease, it can only be derived by `Lease`. It will be sent / to the local read thread, so name it remote. If Lease expires, the remote must / expire too.
func (*RemoteLease) Expire ¶
func (r *RemoteLease) Expire()
func (*RemoteLease) Inspect ¶
func (r *RemoteLease) Inspect(ts *time.Time) LeaseState
func (*RemoteLease) Renew ¶
func (r *RemoteLease) Renew(bound time.Time)
func (*RemoteLease) Term ¶
func (r *RemoteLease) Term() uint64
type ReqCbPair ¶
type ReqCbPair struct { Req *raft_cmdpb.RaftCmdRequest Cb *Callback }
type RequestInspector ¶
type RequestInspector interface {
// contains filtered or unexported methods
}
type RequestPolicy ¶
type RequestPolicy int
const ( // Handle the read request directly without dispatch. RequestPolicy_ReadLocal RequestPolicy = 0 + iota // Handle the read request via raft's SafeReadIndex mechanism. RequestPolicy_ReadIndex RequestPolicy_ProposeNormal RequestPolicy_ProposeTransferLeader RequestPolicy_ProposeConfChange RequestPolicy_Invalid )
func Inspect ¶
func Inspect(i RequestInspector, req *raft_cmdpb.RaftCmdRequest) (RequestPolicy, error)
type ServerTransport ¶
type ServerTransport struct {
// contains filtered or unexported fields
}
func NewServerTransport ¶
func NewServerTransport(raftClient *RaftClient, snapScheduler chan<- task, router *router) *ServerTransport
func (*ServerTransport) Flush ¶
func (t *ServerTransport) Flush()
func (*ServerTransport) ReportSnapshotStatus ¶
func (t *ServerTransport) ReportSnapshotStatus(msg *raft_serverpb.RaftMessage, status raft.SnapshotStatus)
func (*ServerTransport) ReportUnreachable ¶
func (t *ServerTransport) ReportUnreachable(msg *raft_serverpb.RaftMessage)
func (*ServerTransport) Send ¶
func (t *ServerTransport) Send(msg *raft_serverpb.RaftMessage) error
func (*ServerTransport) SendSnapshotSock ¶
func (t *ServerTransport) SendSnapshotSock(msg *raft_serverpb.RaftMessage)
type Snap ¶
type Snap struct { CFFiles []*CFFile MetaFile *MetaFile SizeTrack *int64 // contains filtered or unexported fields }
func NewSnapForApplying ¶
func NewSnapForBuilding ¶
func NewSnapForReceiving ¶
func NewSnapForReceiving(dir string, key SnapKey, snapshotMeta *rspb.SnapshotMeta, sizeTrack *int64, deleter SnapshotDeleter, limiter *IOLimiter) (*Snap, error)
func NewSnapForSending ¶
func (*Snap) Apply ¶
func (s *Snap) Apply(opts ApplyOptions) (ApplyResult, error)
func (*Snap) Build ¶
func (s *Snap) Build(dbSnap *regionSnapshot, region *metapb.Region, snapData *rspb.RaftSnapshotData, stat *SnapStatistics, deleter SnapshotDeleter) error
type SnapKey ¶
func SnapKeyFromRegionSnap ¶
type SnapKeyWithSending ¶
type SnapManager ¶
type SnapManager struct { MaxTotalSize uint64 // contains filtered or unexported fields }
func NewSnapManager ¶
func NewSnapManager(path string, router *router) *SnapManager
func (*SnapManager) DeleteSnapshot ¶
func (sm *SnapManager) DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool
func (*SnapManager) Deregister ¶
func (sm *SnapManager) Deregister(key SnapKey, entry SnapEntry)
func (*SnapManager) GetSnapshotForApplying ¶
func (sm *SnapManager) GetSnapshotForApplying(snapKey SnapKey) (Snapshot, error)
func (*SnapManager) GetSnapshotForBuilding ¶
func (sm *SnapManager) GetSnapshotForBuilding(key SnapKey) (Snapshot, error)
func (*SnapManager) GetSnapshotForReceiving ¶
func (sm *SnapManager) GetSnapshotForReceiving(snapKey SnapKey, data []byte) (Snapshot, error)
func (*SnapManager) GetSnapshotForSending ¶
func (sm *SnapManager) GetSnapshotForSending(snapKey SnapKey) (Snapshot, error)
func (*SnapManager) GetTotalSnapSize ¶
func (sm *SnapManager) GetTotalSnapSize() uint64
func (*SnapManager) HasRegistered ¶
func (sm *SnapManager) HasRegistered(key SnapKey) bool
func (*SnapManager) ListIdleSnap ¶
func (sm *SnapManager) ListIdleSnap() ([]SnapKeyWithSending, error)
func (*SnapManager) Register ¶
func (sm *SnapManager) Register(key SnapKey, entry SnapEntry)
func (*SnapManager) Stats ¶
func (sm *SnapManager) Stats() SnapStats
type SnapManagerBuilder ¶
type SnapManagerBuilder struct {
// contains filtered or unexported fields
}
func (*SnapManagerBuilder) Build ¶
func (smb *SnapManagerBuilder) Build(path string, router *router) *SnapManager
func (*SnapManagerBuilder) MaxTotalSize ¶
func (smb *SnapManagerBuilder) MaxTotalSize(v uint64) *SnapManagerBuilder
type SnapState ¶
type SnapState struct { StateType SnapStateType Status *JobStatus Receiver chan *eraftpb.Snapshot }
type SnapStateType ¶
type SnapStateType int
const ( SnapState_Relax SnapStateType = 0 + iota SnapState_Generating SnapState_Applying SnapState_ApplyAborted )
type SnapStatistics ¶
type Snapshot ¶
type Snapshot interface { io.Reader io.Writer Build(dbBundle *regionSnapshot, region *metapb.Region, snapData *rspb.RaftSnapshotData, stat *SnapStatistics, deleter SnapshotDeleter) error Path() string Exists() bool Delete() Meta() (os.FileInfo, error) TotalSize() uint64 Save() error Apply(option ApplyOptions) (ApplyResult, error) }
`Snapshot` is an interface for snapshot. It's used in these scenarios:
- build local snapshot
- read local snapshot and then replicate it to remote raftstores
- receive snapshot from remote raftstore and write it to local storage
- apply snapshot
- snapshot gc
type SnapshotDeleter ¶
type SnapshotDeleter interface { // DeleteSnapshot returns true if it successfully delete the specified snapshot. DeleteSnapshot(key SnapKey, snapshot Snapshot, checkEntry bool) bool }
`SnapshotDeleter` is a trait for deleting snapshot. It's used to ensure that the snapshot deletion happens under the protection of locking to avoid race case for concurrent read/write.
type StaleState ¶
type StaleState int
const ( StaleStateValid StaleState = 0 + iota StaleStateToValidate StaleStateLeaderMissing )
type StoreContext ¶
type StoreContext struct { *GlobalContext // contains filtered or unexported fields }
type StoreLabel ¶
type StoreLabel struct {
LabelKey, LabelValue string
}
type TestRaftWriter ¶
type TestRaftWriter struct {
// contains filtered or unexported fields
}
TestRaftWriter is used to mock raft write related prewrite and commit operations without sending real raft commands
func (*TestRaftWriter) Close ¶
func (w *TestRaftWriter) Close()
func (*TestRaftWriter) DeleteRange ¶
func (w *TestRaftWriter) DeleteRange(start, end []byte, latchHandle mvcc.LatchHandle) error
func (*TestRaftWriter) NewWriteBatch ¶
func (w *TestRaftWriter) NewWriteBatch(startTS, commitTS uint64, ctx *kvrpcpb.Context) mvcc.WriteBatch
func (*TestRaftWriter) Open ¶
func (w *TestRaftWriter) Open()
func (*TestRaftWriter) Write ¶
func (w *TestRaftWriter) Write(batch mvcc.WriteBatch) error
type Transport ¶
type Transport interface {
Send(msg *rspb.RaftMessage) error
}
type WaitApplyResultState ¶
type WaitApplyResultState struct {
// contains filtered or unexported fields
}
/ A struct that stores the state to wait for `PrepareMerge` apply result. / / When handling the apply result of a `CommitMerge`, the source peer may have / not handle the apply result of the `PrepareMerge`, so the target peer has / to abort current handle process and wait for it asynchronously.
type WriteBatch ¶
type WriteBatch struct {
// contains filtered or unexported fields
}
func (*WriteBatch) Delete ¶
func (wb *WriteBatch) Delete(key y.Key)
func (*WriteBatch) DeleteLock ¶
func (wb *WriteBatch) DeleteLock(key []byte)
func (*WriteBatch) Len ¶
func (wb *WriteBatch) Len() int
func (*WriteBatch) MustWriteToKV ¶
func (wb *WriteBatch) MustWriteToKV(db *mvcc.DBBundle)
func (*WriteBatch) MustWriteToRaft ¶
func (wb *WriteBatch) MustWriteToRaft(db *badger.DB)
func (*WriteBatch) Reset ¶
func (wb *WriteBatch) Reset()
func (*WriteBatch) Rollback ¶
func (wb *WriteBatch) Rollback(key y.Key)
func (*WriteBatch) RollbackToSafePoint ¶
func (wb *WriteBatch) RollbackToSafePoint()
func (*WriteBatch) SetLock ¶
func (wb *WriteBatch) SetLock(key, val []byte)
func (*WriteBatch) SetSafePoint ¶
func (wb *WriteBatch) SetSafePoint()
func (*WriteBatch) SetWithUserMeta ¶
func (wb *WriteBatch) SetWithUserMeta(key y.Key, val, userMeta []byte)
func (*WriteBatch) WriteToKV ¶
func (wb *WriteBatch) WriteToKV(bundle *mvcc.DBBundle) error
WriteToKV flush WriteBatch to DB by two steps:
- Write entries to badger. After save ApplyState to badger, subsequent regionSnapshot will start at new raft index.
- Update lockStore, the date in lockStore may be older than the DB, so we need to restore then entries from raft log.
func (*WriteBatch) WriteToRaft ¶
func (wb *WriteBatch) WriteToRaft(db *badger.DB) error
Source Files ¶
- applier.go
- bootstrap.go
- cmd_resp.go
- config.go
- db_writer.go
- engine.go
- entry.go
- error.go
- fsm_peer.go
- fsm_store.go
- io_limiter.go
- keys.go
- msg.go
- node.go
- pd_task_handler.go
- peer.go
- peer_storage.go
- peer_worker.go
- raft_client.go
- read.go
- restore.go
- router.go
- server.go
- snap.go
- snapRunner.go
- snap_applier.go
- snap_builder.go
- snap_codec.go
- snap_manager.go
- state.go
- ticker.go
- transport.go
- util.go
- worker.go