server

package
v0.6.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 17, 2018 License: Apache-2.0 Imports: 63 Imported by: 0

Documentation

Overview

Copyright 2016 PingCAP, Inc.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, See the License for the specific language governing permissions and limitations under the License.

Index

Constants

View Source
const (
	HTTP_OK = iota
	HTTP_ERROR
	HTTP_ERROR_PARAMETER_NOT_ENOUGH
	HTTP_ERROR_INVALID_PARAM
	HTTP_ERROR_DATABASE_FIND
	HTTP_ERROR_TABLE_FIND
	HTTP_ERROR_TABLE_DELETED
	HTTP_ERROR_RANGE_CREATE
	HTTP_ERROR_CLUSTER_HAS_NO_LEADER
	HTTP_ERROR_MASTER_IS_NOT_LEADER
	HTTP_ERROR_WRONG_SIGN
	HTTP_ERROR_SIGN_TIMEOUT
	HTTP_ERROR_INVALID_SIGNTIME
	HTTP_ERROR_RANGE_FIND
	HTTP_ERROR_RANGE_SPLIT
	HTTP_ERROR_DATABASE_EXISTED
	HTTP_ERROR_TASK_FIND
	HTTP_ERROR_CLUSTERID
	HTTP_ERROR_NODE_FIND
	HTTP_ERROR_RANGE_BUSY
	HTTP_ERROR_PEER_FIND
)
View Source
const (
	HTTP_DB_NAME                    = "dbName"
	HTTP_DB_ID                      = "dbId"
	HTTP_TABLE_NAME                 = "tableName"
	HTTP_TABLE_ID                   = "tableId"
	HTTP_CLUSTER_ID                 = "clusterId"
	HTTP_RANGE_ID                   = "rangeId"
	HTTP_NODE_ID                    = "nodeId"
	HTTP_NODE_IDS                   = "nodeIds"
	HTTP_PEER_ID                    = "peerId"
	HTTP_NAME                       = "name"
	HTTP_PROPERTIES                 = "properties"
	HTTP_PKDUPCHECK                 = "pkDupCheck"
	HTTP_RANGEKEYS_NUM              = "rangeKeysNum"
	HTTP_RANGEKEYS_START            = "rangeKeysStart"
	HTTP_RANGEKEYS_END              = "rangeKeysEnd"
	HTTP_RANGEKEYS                  = "rangeKeys"
	HTTP_POLICY                     = "policy"
	HTTP_D                          = "d"
	HTTP_S                          = "s"
	HTTP_TOKEN                      = "token"
	HTTP_SQL                        = "sql"
	HTTP_SERVER_PORT                = "serverPort"
	HTTP_RAFT_HEARTBEAT_PORT        = "raftHeartbeatPort"
	HTTP_RAFT_REPLICA_PORT          = "raftReplicaPort"
	HTTP_TASK_ID                    = "taskId"
	HTTP_TASK_IDS                   = "taskIds"
	HTTP_MACHINES                   = "machines"
	HTTP_CLUSTER_AUTO_SCHEDULE_INFO = "clusterAutoScheduleInfo"
	HTTP_AUTO_TRANSFER_UNABLE       = "autoTransferUnable"
	HTTP_AUTO_FAILOVER_UNABLE       = "autoFailoverUnable"
	HTTP_AUTO_SPLIT_UNABLE          = "autoSplitUnable"
	HTTP_TABLE_AUTO_INFO            = "tableAutoInfo"
	HTTP_FAST                       = "fast"
	HTTP_STARTKEY                   = "startKey"
	HTTP_ENDKEY                     = "endKey"
)
View Source
const (
	Min_leader_balance_num = 5
	Min_leader_adjust_num  = 50
)
View Source
const (
	EVENT_TYPE_INIT = iota //永远不要被用到
	EVENT_TYPE_ADD_PEER
	EVENT_TYPE_DEL_PEER
	EVENT_TYPE_CHANGE_LEADER
	EVENT_TYPE_DEL_RANGE
)
View Source
const (
	SUCCESS            int32 = 0
	ER_NOT_LEADER            = 1
	ER_SERVER_BUSY           = 2
	ER_SERVER_STOP           = 3
	ER_READ_ONLY             = 4
	ER_ENTITY_NOT_EXIT       = 5
	ER_UNKNOWN               = 6
)
View Source
const (
	KB uint64 = 1024
	MB        = 1024 * KB
	GB        = 1024 * MB
	PB        = 1024 * GB
)
View Source
const CacheSize = 100
View Source
const DefaultConfig = `` /* 1623-byte string literal not displayed */
View Source
const DefaultFactor = 0.75
View Source
const (
	ROUTE_SUBSCRIBE = "route_subscribe"
)

Variables

View Source
var (
	ERR_NO_SELECTED_NODE        = errors.New("not selected node")
	ErrInternalError            = errors.New("internal error")
	ErrGenID                    = errors.New("gen ID failed")
	ErrDupDatabase              = errors.New("duplicate database")
	ErrDupTable                 = errors.New("duplicate table")
	ErrNotExistDatabase         = errors.New("database not exist")
	ErrNotExistTable            = errors.New("table not exist")
	ErrNotExistNode             = errors.New("node not exist")
	ErrNotActiveNode            = errors.New("node is not up")
	ErrNotExistRange            = errors.New("range not exist")
	ErrExistsRange              = errors.New("range exist")
	ErrNotExistPeer             = errors.New("range peer not exist")
	ErrNotEnoughResources       = errors.New("not enough resources")
	ErrInvalidParam             = errors.New("invalid param")
	ErrInvalidColumn            = errors.New("invalid column")
	ErrColumnNameTooLong        = errors.New("column name is too long")
	ErrDupColumnName            = errors.New("duplicate column name")
	ErrPkMustNotNull            = errors.New("primary key must be not nullable")
	ErrMissingPk                = errors.New("missing primary key")
	ErrPkMustNotSetDefaultValue = errors.New("primary key should not set defaultvalue")
	ErrNodeRejectNewPeer        = errors.New("node reject new peer")
	ErrNodeBlocked              = errors.New("node is blocked")
	ErrNodeStateConfused        = errors.New("confused node state")
	ErrSchedulerExisted         = errors.New("scheduler is existed")
	ErrSchedulerNotFound        = errors.New("scheduler is not found")
	ErrWorkerExisted            = errors.New("worker is existed")
	ErrWorkerNotFound           = errors.New("worker is not found")
	ErrSqlReservedWord          = errors.New("sql reserved word")
	ErrSQLSyntaxError           = errors.New("Syntax error")
	ErrRangeMetaConflict        = errors.New("range meta conflict")
	ErrNotFound                 = errors.New("entity not found")
	ErrNotAllowSplit            = errors.New("not allow split")
	ErrNotCancel                = errors.New("not allow cancel")
	ErrNotAllowDelete           = errors.New("not allow delete")

	ErrRangeStatusErr = errors.New("range status is invalid")
)
View Source
var (
	DefaultMaxNodeDownTimeInterval time.Duration = 60 * time.Second
	DefaultMaxPeerDownTimeInterval time.Duration = 2 * 60 * time.Second

	// 大于一个调度周期+一个心跳周期,预留冗余
	DefaultChangeLeaderTimeout time.Duration = time.Second * time.Duration(30)
	DefaultDelRangeTimeout     time.Duration = time.Second * time.Duration(30)
	DefaultAddPeerTimeout      time.Duration = time.Second * time.Duration(300)
	DefaultDelPeerTimeout      time.Duration = time.Second * time.Duration(30)
)
View Source
var (
	DefaultFaultTimeout  = time.Minute
	DefaultMaxBigTaskNum = 3
	DefaultMaxTaskNum    = 50
)
View Source
var (
	Min_range_balance_num = 10
	Min_range_adjust_num  = 50
)
View Source
var (
	// 单位是秒
	DefaultDownTimeLimit      = 60 * time.Second
	MaxDownReplicaTimeLimit   = 5 * 60 * time.Second
	DefaultDsHearbeatInterval = 10 * time.Second
	DefaultDsRecoveryInterim  = 5 * 60 * time.Second

	DefaultTimeFormat = "2006-01-02 15:04:05"
	// 大于一个调度周期+一个心跳周期,预留冗余
	DefaultChangeLeaderTaskTimeout = time.Second * time.Duration(30)
	DefaultRangeDeleteTaskTimeout  = time.Second * time.Duration(30)
	DefaultRangeAddPeerTaskTimeout = time.Second * time.Duration(300)
	DefaultRangeDelPeerTaskTimeout = time.Second * time.Duration(30)
)
View Source
var AUTO_INCREMENT_ID string = fmt.Sprintf("$auto_increment_id")
View Source
var DefaultMaxSubmitTimeout time.Duration = time.Second * 60
View Source
var DefaultRaftLogCount uint64 = 10000
View Source
var DefaultRetentionTime = time.Hour * time.Duration(72)
View Source
var ErrUnknownCommandType = errors.New("unknown command type")
View Source
var (
	MAX_COLUMN_NAME_LENGTH = 128
)
View Source
var PREFIX_AUTO_FAILOVER string = fmt.Sprintf("$auto_failover_%d")
View Source
var PREFIX_AUTO_FAILOVER_TABLE string = fmt.Sprintf("$auto_failover_table_%d")
View Source
var PREFIX_AUTO_SPLIT string = fmt.Sprintf("$auto_split_%d")
View Source
var PREFIX_AUTO_TRANSFER string = fmt.Sprintf("$auto_transfer_%d")
View Source
var PREFIX_AUTO_TRANSFER_TABLE string = fmt.Sprintf("$auto_transfer_table_%d")
View Source
var PREFIX_DB string = fmt.Sprintf("schema%sdb%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_DELETED_RANGE string = fmt.Sprintf("schema%sdeleted_range%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_NODE string = fmt.Sprintf("schema%snode%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_PRE_GC string = fmt.Sprintf("schema%spre_gc%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_RANGE string = fmt.Sprintf("schema%srange%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_REPLICA string = fmt.Sprintf("schema%sreplica%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_TABLE string = fmt.Sprintf("schema%stable%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var PREFIX_TASK string = fmt.Sprintf("schema%stask%s", SCHEMA_SPLITOR, SCHEMA_SPLITOR)
View Source
var SCHEMA_SPLITOR string = " "

NOTE: prefix's first char must not be '\xff'

View Source
var SQLReservedWord = []string{}/* 424 elements not displayed */

Functions

func ColumnParse

func ColumnParse(cols []string) ([]*metapb.Column, error)

func EditProperties

func EditProperties(properties string) ([]*metapb.Column, error)

func GetTypeByName

func GetTypeByName(name string) metapb.DataType

func NewBalanceNodeOpsWorker

func NewBalanceNodeOpsWorker(wm *WorkerManager, interval time.Duration) *balanceNodeOpsWorker

func NewBalanceNodeRangeWorker

func NewBalanceNodeRangeWorker(wm *WorkerManager, interval time.Duration) *balanceNodeRangeWorker

func NewHBRangeManager

func NewHBRangeManager(cluster *Cluster) *hb_range_manager

func NewHandler

func NewHandler(valid ValidHandler, handler HttpHandler) server.ServiceHttpHandler

func NewIDGenerator

func NewIDGenerator(key []byte, step uint64, store Store) *idGenerator

func ParseProperties

func ParseProperties(properties string) ([]*metapb.Column, []*metapb.Column, error)

func ScopeSplit

func ScopeSplit(a, b []byte, n uint64, charSet []byte) ([][]byte, error)

func SelectMostAndLeastLeaderNode

func SelectMostAndLeastLeaderNode(nodes []*Node, selectors []NodeSelector) (*Node, *Node)

* return:

	the normal node of the most leader number
    the normal node of the least leader number

func SelectMostAndLeastRangeNode

func SelectMostAndLeastRangeNode(opt *scheduleOption, nodes []*Node, selectors []NodeSelector) (*Node, *Node, bool)

* return:

	the normal node of the most leader number or available ration low node
    the normal node of the least leader number

func SqlParse

func SqlParse(_sql string) (t *metapb.Table, err error)

func ToEventStatusName

func ToEventStatusName(status EventStatus) string

func ToEventTypeName

func ToEventTypeName(eventType EventType) string

func ToTableProperty

func ToTableProperty(cols []*metapb.Column) (string, error)

Types

type AddPeerEvent

type AddPeerEvent struct {
	RangeEventMeta
	// contains filtered or unexported fields
}

func NewAddPeerEvent

func NewAddPeerEvent(id, rangeId uint64, p *metapb.Peer, creator string) *AddPeerEvent

func (*AddPeerEvent) Execute

func (e *AddPeerEvent) Execute(cluster *Cluster, r *Range) (ExecNextEvent, *taskpb.Task, error)

type AlarmConfig

type AlarmConfig struct {
	ServerAddress         string           `toml:"server-address" json:"server-address"`
	ServerPort            int              `toml:"server-port,omitempty" json:"port"`
	MessageGatewayAddress string           `toml:"message-gateway-address,omitempty" json:"message-gateway-address"`
	Receivers             []*AlarmReceiver `toml:"receivers,omitempty" json:"receivers"`
}

type AlarmReceiver

type AlarmReceiver struct {
	Mail string `toml:"mail,omitempty" json:"mail"`
	Sms  string `toml:"sms,omitempty" json:"sms"`
}

type Batch

type Batch interface {
	Put(key []byte, value []byte)
	Delete(key []byte)

	Commit() error
}

type ByLetter

type ByLetter [][]byte

func (ByLetter) Len

func (s ByLetter) Len() int

func (ByLetter) Less

func (s ByLetter) Less(i, j int) bool

func (ByLetter) Swap

func (s ByLetter) Swap(i, j int)

type ByPrimaryKey

type ByPrimaryKey []*metapb.Column

func (ByPrimaryKey) Len

func (s ByPrimaryKey) Len() int

func (ByPrimaryKey) Less

func (s ByPrimaryKey) Less(i, j int) bool

func (ByPrimaryKey) Swap

func (s ByPrimaryKey) Swap(i, j int)

type Cluster

type Cluster struct {
	// contains filtered or unexported fields
}

func NewCluster

func NewCluster(clusterId, nodeId uint64, store Store, opt *scheduleOption) *Cluster

func (*Cluster) AddBalanceLeaderWorker

func (c *Cluster) AddBalanceLeaderWorker()

func (*Cluster) AddBalanceNodeOpsWorker

func (c *Cluster) AddBalanceNodeOpsWorker()

func (*Cluster) AddBalanceRangeWorker

func (c *Cluster) AddBalanceRangeWorker()

func (*Cluster) AddCreateTableWorker

func (c *Cluster) AddCreateTableWorker()

func (*Cluster) AddDeleteTableWorker

func (c *Cluster) AddDeleteTableWorker()

func (*Cluster) AddEvent

func (c *Cluster) AddEvent(event RangeEvent) bool

func (*Cluster) AddFailoverWorker

func (c *Cluster) AddFailoverWorker()

func (*Cluster) AddNode

func (c *Cluster) AddNode(node *Node) error

func (*Cluster) AddRange

func (c *Cluster) AddRange(r *Range)

func (*Cluster) AddRangeHbCheckWorker

func (c *Cluster) AddRangeHbCheckWorker()

func (*Cluster) AddTrashReplicaGCWorker

func (c *Cluster) AddTrashReplicaGCWorker()

func (*Cluster) CancelTable

func (c *Cluster) CancelTable(dbName, tName string) error

func (*Cluster) Close

func (c *Cluster) Close()

func (*Cluster) CreateDatabase

func (c *Cluster) CreateDatabase(name string, properties string) (*Database, error)

func (*Cluster) CreateTable

func (c *Cluster) CreateTable(dbName, tableName string, columns, regxs []*metapb.Column, pkDupCheck bool, sliceKeys [][]byte) (*Table, error)

step 1. create table step 2. create range in remote step 3. add range in cache and disk

func (*Cluster) DeleteDatabase

func (c *Cluster) DeleteDatabase(name string) error

func (*Cluster) DeleteNodeByAddr

func (c *Cluster) DeleteNodeByAddr(addr string) error

func (*Cluster) DeleteNodeById

func (c *Cluster) DeleteNodeById(id uint64) error

TODO 非故障节点???

func (*Cluster) DeleteRange

func (c *Cluster) DeleteRange(rangeId uint64)

func (*Cluster) DeleteTable

func (c *Cluster) DeleteTable(dbName, tableName string, fast bool) (*Table, error)

func (*Cluster) EditTable

func (c *Cluster) EditTable(t *Table, properties string) error

func (*Cluster) FindDatabase

func (c *Cluster) FindDatabase(name string) (*Database, bool)

func (*Cluster) FindDatabaseById

func (c *Cluster) FindDatabaseById(id uint64) (*Database, bool)

func (*Cluster) FindDeleteTableById

func (c *Cluster) FindDeleteTableById(tableId uint64) (*Table, bool)

func (*Cluster) FindNodeByAddr

func (c *Cluster) FindNodeByAddr(addr string) *Node

func (*Cluster) FindNodeById

func (c *Cluster) FindNodeById(id uint64) *Node

func (*Cluster) FindPreGCRangeById

func (c *Cluster) FindPreGCRangeById(rangeId uint64) (*metapb.Range, bool)

func (*Cluster) FindRange

func (c *Cluster) FindRange(id uint64) *Range

func (*Cluster) FindTableById

func (c *Cluster) FindTableById(tableId uint64) (*Table, bool)

func (*Cluster) GenId

func (c *Cluster) GenId() (uint64, error)

func (*Cluster) GetAllActiveNode

func (c *Cluster) GetAllActiveNode() []*Node

func (*Cluster) GetAllDatabase

func (c *Cluster) GetAllDatabase() []*Database

func (*Cluster) GetAllEvent

func (c *Cluster) GetAllEvent() []RangeEvent

func (*Cluster) GetAllNode

func (c *Cluster) GetAllNode() []*Node

func (*Cluster) GetAllRanges

func (c *Cluster) GetAllRanges() []*Range

func (*Cluster) GetAllUnhealthyRanges

func (c *Cluster) GetAllUnhealthyRanges() []*Range

func (*Cluster) GetAllUnstableRanges

func (c *Cluster) GetAllUnstableRanges() []*Range

func (*Cluster) GetAllWorker

func (c *Cluster) GetAllWorker() map[string]bool

func (*Cluster) GetClusterId

func (c *Cluster) GetClusterId() uint64

func (*Cluster) GetEvent

func (c *Cluster) GetEvent(rangeID uint64) RangeEvent

func (*Cluster) GetLeader

func (c *Cluster) GetLeader() *Peer

func (*Cluster) GetNodeId

func (c *Cluster) GetNodeId(serverAddr, raftAddr, httpAddr, version string) (*Node, bool, error)

*

get nodeId, and clean up command

func (*Cluster) GetNodeRangeStatByTable

func (c *Cluster) GetNodeRangeStatByTable(tableId uint64) map[uint64]int

func (*Cluster) GetTableAllRanges

func (c *Cluster) GetTableAllRanges(tableId uint64) []*Range

func (*Cluster) GetWorkerInfo

func (c *Cluster) GetWorkerInfo(workerName string) string

func (*Cluster) IsLeader

func (c *Cluster) IsLeader() bool

func (*Cluster) LoadCache

func (c *Cluster) LoadCache() error

func (*Cluster) LoginNode

func (c *Cluster) LoginNode(nodeId uint64, force bool) error

func (*Cluster) LogoutNode

func (c *Cluster) LogoutNode(nodeId uint64) error

func (*Cluster) MultipleSearchRanges

func (c *Cluster) MultipleSearchRanges(key []byte, num int) []*Range

func (*Cluster) NodeLogin

func (c *Cluster) NodeLogin(nodeId uint64) error

func (*Cluster) NodeUpgrade

func (c *Cluster) NodeUpgrade(nodeID uint64) error

func (*Cluster) RemoveEvent

func (c *Cluster) RemoveEvent(event RangeEvent)

func (*Cluster) RemoveWorker

func (c *Cluster) RemoveWorker(name string) error

func (*Cluster) ReplaceRange

func (c *Cluster) ReplaceRange(old *metapb.Range, new *Range, toGc []*metapb.Peer)

func (*Cluster) ReplaceRangeRemote

func (c *Cluster) ReplaceRangeRemote(addr string, oldRangeId uint64, newRange *metapb.Range) error

func (*Cluster) SearchRange

func (c *Cluster) SearchRange(key []byte) *Range

func (*Cluster) Start

func (c *Cluster) Start()

func (*Cluster) UpdateAutoScheduleInfo

func (c *Cluster) UpdateAutoScheduleInfo(autoFailoverUnable, autoTransferUnable, autoSplitUnable bool) error

func (*Cluster) UpdateLeader

func (c *Cluster) UpdateLeader(leader *Peer)

func (*Cluster) UpdateNode

func (c *Cluster) UpdateNode(node *Node) error

func (*Cluster) UpdateNodeState

func (c *Cluster) UpdateNodeState(n *Node, state metapb.NodeState) error

func (*Cluster) UpdateRangeEpochRemote

func (c *Cluster) UpdateRangeEpochRemote(r *Range, epoch *metapb.RangeEpoch) error

func (*Cluster) UpdateRangeRemote

func (c *Cluster) UpdateRangeRemote(addr string, r *metapb.Range) error

func (*Cluster) UpgradeNode

func (c *Cluster) UpgradeNode(nodeId uint64) error

type ClusterConfig

type ClusterConfig struct {
	ClusterID uint64         `toml:"cluster-id,omitempty" json:"cluster-id"`
	Peers     []*ClusterPeer `toml:"peer,omitempty" json:"peer"`
}

type ClusterIDGenerator

type ClusterIDGenerator struct {
	// contains filtered or unexported fields
}

func (ClusterIDGenerator) GenID

func (id ClusterIDGenerator) GenID() (uint64, error)

type ClusterPeer

type ClusterPeer struct {
	ID        uint64 `toml:"id,omitempty" json:"id"`
	Host      string `toml:"host,omitempty" json:"host"`
	HttpPort  int    `toml:"http-port,omitempty" json:"http-port"`
	RpcPort   int    `toml:"rpc-port,omitempty" json:"rpc-port"`
	RaftPorts []int  `toml:"raft-ports,omitempty" json:"raft-ports"`
}

type Config

type Config struct {
	Name   string `toml:"name,omitempty" json:"name"`
	NodeId uint64 `toml:"node-id,omitempty" json:"node-id"`

	Role      string `toml:"role,omitempty" json:"role"`
	Version   string `toml:"version,omitempty" json:"version"`
	SecretKey string `toml:"secret-key,omitempty" json:"secret-key"`
	DataPath  string `toml:"data-dir,omitempty" json:"data-dir"`

	Cluster     ClusterConfig     `toml:"cluster,omitempty" json:"cluster"`
	Raft        RaftConfig        `toml:"raft,omitempty" json:"raft"`
	Log         LogConfig         `toml:"log,omitempty" json:"log"`
	Metric      MetricConfig      `toml:"metric,omitempty" json:"metric"`
	Schedule    ScheduleConfig    `toml:"schedule,omitempty" json:"schedule"`
	Replication ReplicationConfig `toml:"replication,omitempty" json:"replication"`

	Threshold metric.ThresholdConfig `toml:"threshold,omitempty" json:"threshold"`
	Alarm     AlarmConfig            `toml:"alarm,omitempty" json:"alarm"`
	// contains filtered or unexported fields
}

func NewDefaultConfig

func NewDefaultConfig() *Config

func (*Config) LoadFromFile

func (c *Config) LoadFromFile(path string) error

type CreateTable

type CreateTable struct {
	*Table
	sync.RWMutex
	// contains filtered or unexported fields
}

func NewCreateTable

func NewCreateTable(t *Table, n uint64) *CreateTable

func (*CreateTable) AddRange

func (t *CreateTable) AddRange(r *Range)

func (*CreateTable) DeleteRange

func (t *CreateTable) DeleteRange(rangeID uint64)

func (*CreateTable) GetAllRanges

func (t *CreateTable) GetAllRanges() []*Range

type CreateTableCache

type CreateTableCache struct {
	// contains filtered or unexported fields
}

func NewCreateTableCache

func NewCreateTableCache() *CreateTableCache

func (*CreateTableCache) Add

func (tc *CreateTableCache) Add(t *CreateTable)

func (*CreateTableCache) Delete

func (tc *CreateTableCache) Delete(id uint64)

func (*CreateTableCache) FindTable

func (tc *CreateTableCache) FindTable(id uint64) (*CreateTable, bool)

func (*CreateTableCache) GetAllTable

func (tc *CreateTableCache) GetAllTable() []*CreateTable

func (*CreateTableCache) Size

func (tc *CreateTableCache) Size() int

type CreateTableWorker

type CreateTableWorker struct {
	// contains filtered or unexported fields
}

func (*CreateTableWorker) AllowWork

func (dt *CreateTableWorker) AllowWork(cluster *Cluster) bool

func (*CreateTableWorker) GetInterval

func (dt *CreateTableWorker) GetInterval() time.Duration

func (*CreateTableWorker) GetName

func (dt *CreateTableWorker) GetName() string

func (*CreateTableWorker) Stop

func (dt *CreateTableWorker) Stop()

func (*CreateTableWorker) Work

func (dt *CreateTableWorker) Work(c *Cluster)

type Database

type Database struct {
	*metapb.DataBase
	// contains filtered or unexported fields
}

func NewDatabase

func NewDatabase(db *metapb.DataBase) *Database

func (*Database) AddTable

func (db *Database) AddTable(t *Table)

func (*Database) DeleteTableById

func (db *Database) DeleteTableById(id uint64) error

real delete

func (*Database) DeleteTableByName

func (db *Database) DeleteTableByName(name string) error

仅仅从当前table列表中删除

func (*Database) FindTable

func (db *Database) FindTable(name string) (*Table, bool)

查找当前的table

func (*Database) FindTableById

func (db *Database) FindTableById(id uint64) (*Table, bool)

查找存在的table

func (*Database) GetAllTable

func (db *Database) GetAllTable() []*Table

func (*Database) Lock

func (db *Database) Lock()

func (*Database) Name

func (db *Database) Name() string

func (*Database) UnLock

func (db *Database) UnLock()

type DbCache

type DbCache struct {
	// contains filtered or unexported fields
}

func NewDbCache

func NewDbCache() *DbCache

func (*DbCache) Add

func (dc *DbCache) Add(d *Database)

func (*DbCache) Delete

func (dc *DbCache) Delete(name string)

func (*DbCache) FindDb

func (dc *DbCache) FindDb(name string) (*Database, bool)

func (*DbCache) FindDbById

func (dc *DbCache) FindDbById(id uint64) (*Database, bool)

func (*DbCache) GetAllDatabase

func (dc *DbCache) GetAllDatabase() []*Database

func (*DbCache) Size

func (dc *DbCache) Size() int

type DelPeerEvent

type DelPeerEvent struct {
	RangeEventMeta
	// contains filtered or unexported fields
}

func NewDelPeerEvent

func NewDelPeerEvent(id, rangeId uint64, p *metapb.Peer, creator string) *DelPeerEvent

func (*DelPeerEvent) Execute

func (e *DelPeerEvent) Execute(cluster *Cluster, r *Range) (ExecNextEvent, *taskpb.Task, error)

type DelRangeEvent

type DelRangeEvent struct {
	RangeEventMeta
	// contains filtered or unexported fields
}

func NewDelRangeEvent

func NewDelRangeEvent(id, rangeId uint64, creator string) *DelRangeEvent

func (*DelRangeEvent) Execute

func (e *DelRangeEvent) Execute(cluster *Cluster, r *Range) (ExecNextEvent, *taskpb.Task, error)

type DeleteTableWorker

type DeleteTableWorker struct {
	// contains filtered or unexported fields
}

func (*DeleteTableWorker) AllowWork

func (dt *DeleteTableWorker) AllowWork(cluster *Cluster) bool

func (*DeleteTableWorker) GetInterval

func (dt *DeleteTableWorker) GetInterval() time.Duration

func (*DeleteTableWorker) GetName

func (dt *DeleteTableWorker) GetName() string

func (*DeleteTableWorker) Stop

func (dt *DeleteTableWorker) Stop()

func (*DeleteTableWorker) Work

func (dt *DeleteTableWorker) Work(cluster *Cluster)

type DifferCacheNodeSelector

type DifferCacheNodeSelector struct {
	// contains filtered or unexported fields
}

func NewDifferCacheNodeSelector

func NewDifferCacheNodeSelector(cache *idCache) *DifferCacheNodeSelector

func (*DifferCacheNodeSelector) CanSelect

func (sel *DifferCacheNodeSelector) CanSelect(node *Node) bool

func (*DifferCacheNodeSelector) Name

func (sel *DifferCacheNodeSelector) Name() string

type DifferIPSelector

type DifferIPSelector struct {
	// contains filtered or unexported fields
}

func NewDifferIPSelector

func NewDifferIPSelector(excludeNodes []*Node) *DifferIPSelector

func (*DifferIPSelector) CanSelect

func (sel *DifferIPSelector) CanSelect(node *Node) bool

func (*DifferIPSelector) Name

func (sel *DifferIPSelector) Name() string

type DifferNodeSelector

type DifferNodeSelector struct {
	// contains filtered or unexported fields
}

func (*DifferNodeSelector) CanSelect

func (sel *DifferNodeSelector) CanSelect(node *Node) bool

func (*DifferNodeSelector) Name

func (sel *DifferNodeSelector) Name() string

type Distribution

type Distribution struct {
	// contains filtered or unexported fields
}

type Distributions

type Distributions []Distribution

func (Distributions) Len

func (d Distributions) Len() int

func (Distributions) Less

func (d Distributions) Less(i, j int) bool

func (Distributions) Swap

func (d Distributions) Swap(i, j int)

type EventDispatcher

type EventDispatcher struct {
	// contains filtered or unexported fields
}

func NewEventDispatcher

func NewEventDispatcher(cluster *Cluster, opt *scheduleOption) *EventDispatcher

func (*EventDispatcher) Dispatch

func (dispatcher *EventDispatcher) Dispatch(r *Range) *taskpb.Task

func (*EventDispatcher) Run

func (c *EventDispatcher) Run()

func (*EventDispatcher) Stop

func (c *EventDispatcher) Stop()

type EventStatus

type EventStatus int
const (
	EVENT_STATUS_INIT EventStatus = iota //永远不要被用到
	EVENT_STATUS_CREATE
	EVENT_STATUS_DEALING
	EVENT_STATUS_FINISH
	EVENT_STATUS_TIMEOUT
	EVENT_STATUS_CANCEL
	EVENT_STATUS_FAILURE
)

type EventType

type EventType int

type ExecNextEvent

type ExecNextEvent bool

type FailoverWorker

type FailoverWorker struct {
	// contains filtered or unexported fields
}

根据上报的

func (*FailoverWorker) AllowWork

func (f *FailoverWorker) AllowWork(cluster *Cluster) bool

func (*FailoverWorker) GetInterval

func (f *FailoverWorker) GetInterval() time.Duration

func (*FailoverWorker) GetName

func (f *FailoverWorker) GetName() string

func (*FailoverWorker) Stop

func (f *FailoverWorker) Stop()

func (*FailoverWorker) Work

func (f *FailoverWorker) Work(cluster *Cluster)

type GlobalDeletedRange

type GlobalDeletedRange struct {
	// contains filtered or unexported fields
}

func NewGlobalDeletedRange

func NewGlobalDeletedRange() *GlobalDeletedRange

func (*GlobalDeletedRange) Add

func (rc *GlobalDeletedRange) Add(rng *metapb.Range)

func (*GlobalDeletedRange) Delete

func (rc *GlobalDeletedRange) Delete(id uint64)

func (*GlobalDeletedRange) FindRange

func (rc *GlobalDeletedRange) FindRange(id uint64) (*metapb.Range, bool)

type GlobalPreGCRange

type GlobalPreGCRange struct {
	// contains filtered or unexported fields
}

func NewGlobalPreGCRange

func NewGlobalPreGCRange() *GlobalPreGCRange

func (*GlobalPreGCRange) Add

func (rc *GlobalPreGCRange) Add(rng *metapb.Range)

func (*GlobalPreGCRange) Delete

func (rc *GlobalPreGCRange) Delete(id uint64)

func (*GlobalPreGCRange) FindRange

func (rc *GlobalPreGCRange) FindRange(id uint64) (*metapb.Range, bool)

type GlobalTableCache

type GlobalTableCache struct {
	// contains filtered or unexported fields
}

func NewGlobalTableCache

func NewGlobalTableCache() *GlobalTableCache

func (*GlobalTableCache) Add

func (tc *GlobalTableCache) Add(t *Table)

func (*GlobalTableCache) DeleteById

func (tc *GlobalTableCache) DeleteById(id uint64)

func (*GlobalTableCache) FindTableById

func (tc *GlobalTableCache) FindTableById(id uint64) (*Table, bool)

func (*GlobalTableCache) GetAllTable

func (tc *GlobalTableCache) GetAllTable() []*Table

func (*GlobalTableCache) Size

func (tc *GlobalTableCache) Size() int

type HbRingBuf

type HbRingBuf struct {
	// contains filtered or unexported fields
}

func NewHbRingBuf

func NewHbRingBuf(cap uint32) *HbRingBuf

func (*HbRingBuf) CalcMaxHbDiff

func (rb *HbRingBuf) CalcMaxHbDiff() (time.Duration, error)

func (*HbRingBuf) GetLastHbTime

func (rb *HbRingBuf) GetLastHbTime() time.Time

func (*HbRingBuf) ResetHbRingBuf

func (rb *HbRingBuf) ResetHbRingBuf()

func (*HbRingBuf) SetCurHbTime

func (rb *HbRingBuf) SetCurHbTime()

type HttpHandler

type HttpHandler func(w http.ResponseWriter, r *http.Request)

type HttpReply

type HttpReply httpReply

type IDGenerator

type IDGenerator interface {
	GenID() (uint64, error)
}

func NewClusterIDGenerator

func NewClusterIDGenerator(store Store) IDGenerator

type Iterator

type Iterator interface {
	// return false if over or error
	Next() bool

	Key() []byte
	Value() []byte

	Error() error

	// Release iterator使用完需要释放
	Release()
}

type LevelDBBatch

type LevelDBBatch struct {
	// contains filtered or unexported fields
}

func (*LevelDBBatch) Commit

func (b *LevelDBBatch) Commit() error

func (*LevelDBBatch) Delete

func (b *LevelDBBatch) Delete(key []byte)

func (*LevelDBBatch) Put

func (b *LevelDBBatch) Put(key []byte, value []byte)

type LevelDBDriver

type LevelDBDriver struct {
	// contains filtered or unexported fields
}

local store, for case test

func (*LevelDBDriver) Close

func (ld *LevelDBDriver) Close() error

func (*LevelDBDriver) Delete

func (ld *LevelDBDriver) Delete(key []byte) error

func (*LevelDBDriver) Get

func (ld *LevelDBDriver) Get(key []byte) (value []byte, err error)

func (*LevelDBDriver) NewBatch

func (ld *LevelDBDriver) NewBatch() Batch

批量写入,提交时保证batch里的修改同时对外可见

func (*LevelDBDriver) Open

func (ld *LevelDBDriver) Open() error

func (*LevelDBDriver) Put

func (ld *LevelDBDriver) Put(key []byte, value []byte) error

func (*LevelDBDriver) Scan

func (ld *LevelDBDriver) Scan(startKey, endKey []byte) Iterator

type LevelDBIter

type LevelDBIter struct {
	// contains filtered or unexported fields
}

func (*LevelDBIter) Error

func (i *LevelDBIter) Error() error

func (*LevelDBIter) Key

func (i *LevelDBIter) Key() []byte

func (*LevelDBIter) Next

func (i *LevelDBIter) Next() bool

func (*LevelDBIter) Release

func (i *LevelDBIter) Release()

Release iterator使用完需要释放

func (*LevelDBIter) Value

func (i *LevelDBIter) Value() []byte

type LocalDSClient

type LocalDSClient struct {
}

func (*LocalDSClient) Close

func (lc *LocalDSClient) Close() error

Close should release all data.

func (*LocalDSClient) CreateRange

func (lc *LocalDSClient) CreateRange(addr string, r *metapb.Range) error

SendKVReq sends kv request.

func (*LocalDSClient) DeleteRange

func (lc *LocalDSClient) DeleteRange(addr string, rangeId uint64) error

func (*LocalDSClient) GetPeerInfo

func (lc *LocalDSClient) GetPeerInfo(addr string, rangeId uint64) (*schpb.GetPeerInfoResponse, error)

func (*LocalDSClient) OffLineRange

func (lc *LocalDSClient) OffLineRange(addr string, rangeId uint64) error

func (*LocalDSClient) ReplaceRange

func (lc *LocalDSClient) ReplaceRange(addr string, oldRangeId uint64, newRange *metapb.Range) error

func (*LocalDSClient) SetNodeLogLevel

func (lc *LocalDSClient) SetNodeLogLevel(addr string, level string) error

func (*LocalDSClient) TransferLeader

func (lc *LocalDSClient) TransferLeader(addr string, rangeId uint64) error

func (*LocalDSClient) UpdateRange

func (lc *LocalDSClient) UpdateRange(addr string, r *metapb.Range) error

type LogConfig

type LogConfig struct {
	Dir    string `toml:"dir,omitempty" json:"dir"`
	Module string `toml:"module,omitempty" json:"module"`
	Level  string `toml:"level,omitempty" json:"level"`
}

type Member

type Member struct {
	Id        uint64   `json:"id"`
	Ip        string   `json:"ip"`
	HttpPort  uint16   `json:"http_port"`
	RpcPort   uint16   `json:"rpc_port"`
	RaftPorts []uint16 `json:"raft_ports"`
}

{"id":1,"ip":"127.0.165.52", "http_port":8887,"rpc_port":8888, "raft_ports":[8877,8867]}

type Metric

type Metric struct {
	// contains filtered or unexported fields
}

func NewMetric

func NewMetric(cluster *Cluster, addr string, interval time.Duration) *Metric

func (*Metric) CollectEvent

func (m *Metric) CollectEvent(event RangeEvent)

func (*Metric) CollectScheduleCounter

func (m *Metric) CollectScheduleCounter(name, label string)

func (*Metric) Run

func (m *Metric) Run()

func (*Metric) Stop

func (m *Metric) Stop()

type MetricConfig

type MetricConfig struct {
	Interval util.Duration `toml:"interval,omitempty" json:"interval"`
	Address  string        `toml:"address,omitempty" json:"address"`

	Server MetricServer `toml:"server,omitempty" json:"server"`
}

type MetricServer

type MetricServer struct {
	Address   string   `toml:"address,omitempty" json:"address"`
	QueueNum  uint64   `toml:"queue-num,omitempty" json:"queue-num"`
	StoreType string   `toml:"store-type,omitempty" json:"store-type"`
	StoreUrl  []string `toml:"store-url,omitempty" json:"store-url"`
}

type MockDs

type MockDs struct {
	NodeId  uint64
	RpcAddr string
}

********mock ds *************

func NewMockDs

func NewMockDs(rpcAddr string) *MockDs

func (*MockDs) SetNodeId

func (ds *MockDs) SetNodeId(id uint64)

type Node

type Node struct {
	*metapb.Node

	LastHeartbeatTS time.Time
	Trace           bool
	// contains filtered or unexported fields
}

TODO 机器不同导致的分片数量

func NewNode

func NewNode(node *metapb.Node) *Node

func SelectLeaderNode

func SelectLeaderNode(nodes []*Node, selectors []NodeSelector, mostLeaderNum float64) *Node

func (*Node) AddRange

func (n *Node) AddRange(r *Range)

func (*Node) AddTrashReplica

func (n *Node) AddTrashReplica(peer *metapb.Replica)

func (*Node) DeleteRange

func (n *Node) DeleteRange(rangeId uint64)

func (*Node) DeleteTrashReplica

func (n *Node) DeleteTrashReplica(id uint64)

func (*Node) GetAllRanges

func (n *Node) GetAllRanges() []*Range

func (*Node) GetAllTrashRangeIds

func (n *Node) GetAllTrashRangeIds() []uint64

func (*Node) GetAllTrashReplicas

func (n *Node) GetAllTrashReplicas() []*metapb.Replica

func (*Node) GetApplyingSnapCount

func (n *Node) GetApplyingSnapCount() uint32

func (*Node) GetLeaderCount

func (n *Node) GetLeaderCount() uint32

func (*Node) GetRange

func (n *Node) GetRange(id uint64) (*Range, bool)

func (*Node) GetRangesCount

func (n *Node) GetRangesCount() uint32

func (*Node) GetRangesSize

func (n *Node) GetRangesSize() int

func (*Node) GetReceivingSnapCount

func (n *Node) GetReceivingSnapCount() uint32

func (*Node) GetSendingSnapCount

func (n *Node) GetSendingSnapCount() uint32

func (*Node) GetStartTS

func (n *Node) GetStartTS() time.Time

GetStartTS returns the start timestamp.

func (*Node) GetUptime

func (n *Node) GetUptime() time.Duration

GetUptime returns the uptime.

func (*Node) ID

func (n *Node) ID() uint64

func (*Node) IsBusy

func (n *Node) IsBusy() bool

func (*Node) IsDown

func (n *Node) IsDown() bool

IsDown returns whether the store is down

func (*Node) IsFault

func (n *Node) IsFault() bool

func (*Node) IsLogin

func (n *Node) IsLogin() bool

func (*Node) IsLogout

func (n *Node) IsLogout() bool

func (*Node) UpdateState

func (n *Node) UpdateState(state metapb.NodeState)

type NodeCache

type NodeCache struct {
	// contains filtered or unexported fields
}

func NewNodeCache

func NewNodeCache() *NodeCache

func (*NodeCache) Add

func (nc *NodeCache) Add(n *Node)

func (*NodeCache) DeleteByAddr

func (nc *NodeCache) DeleteByAddr(addr string)

func (*NodeCache) DeleteById

func (nc *NodeCache) DeleteById(id uint64)

func (*NodeCache) FindNodeByAddr

func (nc *NodeCache) FindNodeByAddr(addr string) (*Node, bool)

func (*NodeCache) FindNodeById

func (nc *NodeCache) FindNodeById(id uint64) (*Node, bool)

func (*NodeCache) GetAllActiveNode

func (nc *NodeCache) GetAllActiveNode() []*Node

func (*NodeCache) GetAllNode

func (nc *NodeCache) GetAllNode() []*Node

func (*NodeCache) Size

func (nc *NodeCache) Size() int

type NodeDebug

type NodeDebug struct {
	*metapb.Node
	Ranges      []*Range     `json:"ranges"`
	LastHbTime  time.Time    `json:"last_hb_time"`
	LastSchTime time.Time    `json:"last_sch_time"`
	LastOpt     *taskpb.Task `json:"last_opt"`
}

type NodeLoginSelector

type NodeLoginSelector struct {
	// contains filtered or unexported fields
}

func NewNodeLoginSelector

func NewNodeLoginSelector(opt *scheduleOption) *NodeLoginSelector

func (*NodeLoginSelector) CanSelect

func (sel *NodeLoginSelector) CanSelect(node *Node) bool

func (*NodeLoginSelector) Name

func (sel *NodeLoginSelector) Name() string

type NodeOpsStat

type NodeOpsStat struct {
	// contains filtered or unexported fields
}

func (*NodeOpsStat) Clear

func (opsStat *NodeOpsStat) Clear() uint64

func (*NodeOpsStat) GetMax

func (opsStat *NodeOpsStat) GetMax() uint64

func (*NodeOpsStat) Hit

func (opsStat *NodeOpsStat) Hit(v uint64)

type NodeSelector

type NodeSelector interface {
	Name() string
	CanSelect(node *Node) bool
}

* 挑选合适的node

type Peer

type Peer struct {
	ID                uint64 `json:"id"`
	WebManageAddr     string `json:"web_addr"`
	RpcServerAddr     string `json:"rpc_addr"`
	RaftHeartbeatAddr string `json:"raft_hb_addr"`
	RaftReplicateAddr string `json:"raft_rp_addr"`
}

func (*Peer) GetId

func (p *Peer) GetId() uint64

type Proxy

type Proxy struct {
	// contains filtered or unexported fields
}

type RaftConfig

type RaftConfig struct {
	HeartbeatInterval util.Duration `toml:"heartbeat-interval,omitempty" json:"heartbeat-interval"`
	RetainLogsCount   uint64        `toml:"retain-logs-count,omitempty" json:"retain-logs-count"`
}

type RaftStore

type RaftStore struct {
	// contains filtered or unexported fields
}

func NewRaftStore

func NewRaftStore(conf *StoreConfig) (*RaftStore, error)

func (*RaftStore) ApplySnapshot

func (s *RaftStore) ApplySnapshot(iter *raftgroup.SnapshotKVIterator) error

func (*RaftStore) Close

func (s *RaftStore) Close() error

func (*RaftStore) Delete

func (s *RaftStore) Delete(key []byte) error

func (*RaftStore) Get

func (s *RaftStore) Get(key []byte) ([]byte, error)

func (*RaftStore) GetSnapshot

func (s *RaftStore) GetSnapshot() (model.Snapshot, error)

func (*RaftStore) HandleApplySnapshot

func (s *RaftStore) HandleApplySnapshot(peers []raftproto.Peer, iter *raftgroup.SnapshotKVIterator) error

func (*RaftStore) HandleCmd

func (s *RaftStore) HandleCmd(req *ms_raftcmdpb.Request, raftIndex uint64) (resp *ms_raftcmdpb.Response, err error)

func (*RaftStore) HandleGetSnapshot

func (s *RaftStore) HandleGetSnapshot() (model.Snapshot, error)

//TODO

func (*RaftStore) HandlePeerChange

func (s *RaftStore) HandlePeerChange(confChange *raftproto.ConfChange) (res interface{}, err error)

func (*RaftStore) NewBatch

func (s *RaftStore) NewBatch() Batch

func (*RaftStore) Open

func (s *RaftStore) Open() error

func (*RaftStore) Put

func (s *RaftStore) Put(key, value []byte) error

func (*RaftStore) Scan

func (s *RaftStore) Scan(startKey, limitKey []byte) Iterator

type Range

type Range struct {
	*metapb.Range
	Leader       *metapb.Peer
	DownPeers    []*mspb.PeerStats
	PendingPeers []*metapb.Peer

	BytesWritten uint64
	BytesRead    uint64

	KeysWritten uint64
	KeysRead    uint64

	// Approximate range size.
	ApproximateSize uint64

	State metapb.RangeState
	Trace bool

	LastHbTimeTS time.Time
	// contains filtered or unexported fields
}

func NewRange

func NewRange(r *metapb.Range, leader *metapb.Peer) *Range

func (*Range) GetDownPeer

func (r *Range) GetDownPeer(peerID uint64) *metapb.Peer

GetDownPeer return the down peers with specified peer id

func (*Range) GetDownPeers

func (r *Range) GetDownPeers() []*metapb.Peer

func (*Range) GetFollowers

func (r *Range) GetFollowers() map[uint64]*metapb.Peer

GetFollowers return a map indicate the follow peers distributed

func (*Range) GetLeader

func (r *Range) GetLeader() *metapb.Peer

func (*Range) GetNodeIds

func (r *Range) GetNodeIds() map[uint64]struct{}

GetNodeIds return a map indicate the region distributed

func (*Range) GetNodePeer

func (r *Range) GetNodePeer(nodeID uint64) *metapb.Peer

GetNodePeer return the peer in specified Node

func (*Range) GetNodes

func (r *Range) GetNodes(cluster *Cluster) (nodes []*Node)

func (*Range) GetPeer

func (r *Range) GetPeer(peerID uint64) *metapb.Peer

GetPeer return the peer with specified peer id

func (*Range) GetPendingPeer

func (r *Range) GetPendingPeer(peerID uint64) *metapb.Peer

GetPendingPeer return the pending peer with specified peer id

func (*Range) GetPendingPeers

func (r *Range) GetPendingPeers() []*metapb.Peer

func (*Range) GetRandomFollower

func (r *Range) GetRandomFollower() *metapb.Peer

func (*Range) ID

func (r *Range) ID() uint64

func (*Range) IsHealthy

func (r *Range) IsHealthy() bool

func (*Range) RemoveNodePeer

func (r *Range) RemoveNodePeer(NodeID uint64)

RemoveNodePeer remove the peer in specified Node

func (*Range) SString

func (r *Range) SString() string

type RangeCache

type RangeCache struct {
	// contains filtered or unexported fields
}

func NewRangeCache

func NewRangeCache() *RangeCache

func (*RangeCache) Add

func (rc *RangeCache) Add(r *Range)

func (*RangeCache) Delete

func (rc *RangeCache) Delete(id uint64) *Range

func (*RangeCache) FindRangeByID

func (rc *RangeCache) FindRangeByID(id uint64) (*Range, bool)

func (*RangeCache) GetAllRange

func (rc *RangeCache) GetAllRange() []*Range

func (*RangeCache) GetAllRangeFromTopology

func (rc *RangeCache) GetAllRangeFromTopology() []*metapb.Range

func (*RangeCache) GetRandomRange

func (rc *RangeCache) GetRandomRange() *Range

func (*RangeCache) GetTableAllRanges

func (rc *RangeCache) GetTableAllRanges(tableId uint64) []*Range

func (*RangeCache) GetTableAllRangesFromTopology

func (rc *RangeCache) GetTableAllRangesFromTopology(tableId uint64) []*metapb.Range

func (*RangeCache) GetTableRangeDuplicate

func (rc *RangeCache) GetTableRangeDuplicate(tableId uint64) []*metapb.Range

func (*RangeCache) GetTableTopologyMissing

func (rc *RangeCache) GetTableTopologyMissing(tableId uint64) []*metapb.Range

completeness check

func (*RangeCache) MultipleSearchRanges

func (rc *RangeCache) MultipleSearchRanges(key []byte, num int) ([]*Range, bool)

func (*RangeCache) SearchRange

func (rc *RangeCache) SearchRange(key []byte) (*Range, bool)

func (*RangeCache) Size

func (rc *RangeCache) Size() int

type RangeDebug

type RangeDebug struct {
	*metapb.Range
	Leader       *metapb.Peer      `json:"leader,omitempty"`
	DownPeers    []*mspb.PeerStats `json:"down_peers,omitempty"`
	PendingPeers []*metapb.Peer    `json:"pending_peers,omitempty"`
	LastHbTime   time.Time         `json:"last_hb_time,omitempty"`
	Task         *taskpb.Task      `json:"task,omitempty"`
}

type RangeEvent

type RangeEvent interface {
	IsTimeout() bool
	ExecTime() time.Duration
	String() string
	/**
	@param ExecNextEvent 为true表示要继续调用,如果为false需要判断整个事件是否close,以便把事件删除
	@param Task不为空,应该把ExecNextEvent设置为false
	为了管理端打印方便和执行的步骤,没有把指针设置为当前位置,通过状态跳转到下一个事件

	事件的执行时间需要快速完成,否则会阻塞心跳,事件执行时间过长会导致别的事件无法执行

	*/
	Execute(cluster *Cluster, r *Range) (ExecNextEvent, *taskpb.Task, error)
	GetStatus() EventStatus
	IsClosed() bool
	GetRangeID() uint64
	Next() RangeEvent
	GetId() uint64
	GetType() EventType
}

func NewChangePeerEvent

func NewChangePeerEvent(id uint64, rng *Range, oldPeer, newPeer *metapb.Peer, creator string) RangeEvent

type RangeEventMeta

type RangeEventMeta struct {
	// contains filtered or unexported fields
}

func NewRangeEvent

func NewRangeEvent(id, rangeId uint64, eventType EventType, timeout time.Duration, creator string, task *taskpb.Task) RangeEventMeta

func (*RangeEventMeta) ExecTime

func (m *RangeEventMeta) ExecTime() time.Duration

func (*RangeEventMeta) GetId

func (m *RangeEventMeta) GetId() uint64

func (*RangeEventMeta) GetRangeID

func (m *RangeEventMeta) GetRangeID() uint64

func (*RangeEventMeta) GetStatus

func (m *RangeEventMeta) GetStatus() EventStatus

func (*RangeEventMeta) GetType

func (m *RangeEventMeta) GetType() EventType

func (*RangeEventMeta) IsClosed

func (m *RangeEventMeta) IsClosed() bool

func (*RangeEventMeta) IsTimeout

func (m *RangeEventMeta) IsTimeout() bool

func (*RangeEventMeta) Next

func (m *RangeEventMeta) Next() RangeEvent

func (*RangeEventMeta) String

func (m *RangeEventMeta) String() string

type RangeOpsStat

type RangeOpsStat struct {
	// contains filtered or unexported fields
}

func (*RangeOpsStat) Clear

func (opsStat *RangeOpsStat) Clear() uint64

func (*RangeOpsStat) GetMax

func (opsStat *RangeOpsStat) GetMax() uint64

func (*RangeOpsStat) Hit

func (opsStat *RangeOpsStat) Hit(v uint64)

type RegionCache

type RegionCache struct {
	// contains filtered or unexported fields
}

func NewRegionCache

func NewRegionCache() *RegionCache

func (*RegionCache) Add

func (rc *RegionCache) Add(r *Range)

func (*RegionCache) Delete

func (rc *RegionCache) Delete(id uint64)

func (*RegionCache) FindRangeByID

func (rc *RegionCache) FindRangeByID(id uint64) (*Range, bool)

func (*RegionCache) GetAllRange

func (rc *RegionCache) GetAllRange() []*Range

func (*RegionCache) GetRandomRange

func (rc *RegionCache) GetRandomRange() *Range

func (*RegionCache) Size

func (rc *RegionCache) Size() int

type RegionHbCheckWorker

type RegionHbCheckWorker struct {
	// contains filtered or unexported fields
}

func (*RegionHbCheckWorker) AllowWork

func (hb *RegionHbCheckWorker) AllowWork(cluster *Cluster) bool

func (*RegionHbCheckWorker) GetInterval

func (hb *RegionHbCheckWorker) GetInterval() time.Duration

func (*RegionHbCheckWorker) GetName

func (hb *RegionHbCheckWorker) GetName() string

func (*RegionHbCheckWorker) Stop

func (hb *RegionHbCheckWorker) Stop()

func (*RegionHbCheckWorker) Work

func (hb *RegionHbCheckWorker) Work(cluster *Cluster)

type ReplicaCache

type ReplicaCache struct {
	// contains filtered or unexported fields
}

func NewReplicaCache

func NewReplicaCache() *ReplicaCache

func (*ReplicaCache) Add

func (rc *ReplicaCache) Add(peer *metapb.Replica)

func (*ReplicaCache) Delete

func (rc *ReplicaCache) Delete(id uint64)

func (*ReplicaCache) FindReplica

func (rc *ReplicaCache) FindReplica(id uint64) (*metapb.Replica, bool)

func (*ReplicaCache) GetAllRangIds

func (rc *ReplicaCache) GetAllRangIds() []uint64

func (*ReplicaCache) GetAllReplica

func (rc *ReplicaCache) GetAllReplica() []*metapb.Replica

func (*ReplicaCache) Size

func (rc *ReplicaCache) Size() int

type ReplicationConfig

type ReplicationConfig struct {
	// MaxReplicas is the number of replicas for each region.
	MaxReplicas uint64 `toml:"max-replicas,omitempty" json:"max-replicas"`

	// The label keys specified the location of a store.
	// The placement priorities is implied by the order of label keys.
	// For example, ["zone", "rack"] means that we should place replicas to
	// different zones first, then to different racks if we don't have enough zones.
	LocationLabels util.StringSlice `toml:"location-labels,omitempty" json:"location-labels"`
}

ReplicationConfig is the replication configuration.

type Resolver

type Resolver struct {
	// contains filtered or unexported fields
}

func NewResolver

func NewResolver(nodes map[uint64]*Peer) *Resolver

func (*Resolver) NodeAddress

func (r *Resolver) NodeAddress(nodeID uint64, stype raft.SocketType) (addr string, err error)

type RunMode

type RunMode int
const (
	RUN_MODE_INIT   RunMode = iota //永远不要被用到
	RUN_MODE_LOCAL                 //在master执行
	RUN_MODE_REMOTE                //应答给DS执行
)

type SaveBatch

type SaveBatch struct {
	// contains filtered or unexported fields
}

func NewSaveBatch

func NewSaveBatch(raft *raftgroup.RaftGroup) *SaveBatch

func (*SaveBatch) Commit

func (b *SaveBatch) Commit() error

func (*SaveBatch) Delete

func (b *SaveBatch) Delete(key []byte)

func (*SaveBatch) Put

func (b *SaveBatch) Put(key, value []byte)

type ScheduleConfig

type ScheduleConfig struct {
	// If the snapshot count of one store is greater than this value,
	// it will never be used as a source or target store.
	MaxSnapshotCount uint64 `toml:"max-snapshot-count,omitempty" json:"max-snapshot-count"`
	// MaxStoreDownTime is the max duration after which
	// a store will be considered to be down if it hasn't reported heartbeats.
	MaxNodeDownTime util.Duration `toml:"max-node-down-time,omitempty" json:"max-node-down-time"`
	// LeaderScheduleLimit is the max coexist leader schedules.
	LeaderScheduleLimit uint64 `toml:"leader-schedule-limit,omitempty" json:"leader-schedule-limit"`
	// RegionScheduleLimit is the max coexist region schedules.
	RegionScheduleLimit uint64 `toml:"region-schedule-limit,omitempty" json:"region-schedule-limit"`
	// ReplicaScheduleLimit is the max coexist replica schedules.
	ReplicaScheduleLimit      uint64        `toml:"replica-schedule-limit,omitempty" json:"replica-schedule-limit"`
	MaxTaskTimeout            util.Duration `toml:"max-task-timeout,omitempty" json:"max-task-timeout"`
	MaxRangeDownTime          util.Duration `toml:"max-range-down-time,omitempty" json:"max-range-down-time"`
	NodeRangeBalanceTime      util.Duration `toml:"node-range-balance-time,omitempty" json:"node-range-balance-time"`
	StorageAvailableThreshold uint64        `toml:"storage-available-threshold,omitempty" json:"storage-available-threshold"`
	WriteByteOpsThreshold     uint64        `toml:"writeByte-ops-threshold,omitempty" json:"writeByte-ops-threshold"`
}

ScheduleConfig is the schedule configuration.

type Server

type Server struct {
	// contains filtered or unexported fields
}

func (*Server) AddColumn

func (service *Server) AddColumn(ctx context.Context, req *mspb.AddColumnRequest) (*mspb.AddColumnResponse, error)

func (*Server) AskSplit

func (service *Server) AskSplit(ctx context.Context, req *mspb.AskSplitRequest) (*mspb.AskSplitResponse, error)

func (*Server) CreateDatabase

func (service *Server) CreateDatabase(ctx context.Context, req *mspb.CreateDatabaseRequest) (*mspb.CreateDatabaseResponse, error)

func (*Server) CreateTable

func (service *Server) CreateTable(ctx context.Context, req *mspb.CreateTableRequest) (*mspb.CreateTableResponse, error)

func (*Server) GetColumnById

func (service *Server) GetColumnById(ctx context.Context, req *mspb.GetColumnByIdRequest) (*mspb.GetColumnByIdResponse, error)

func (*Server) GetColumnByName

func (service *Server) GetColumnByName(ctx context.Context, req *mspb.GetColumnByNameRequest) (*mspb.GetColumnByNameResponse, error)

func (*Server) GetColumns

func (service *Server) GetColumns(ctx context.Context, req *mspb.GetColumnsRequest) (*mspb.GetColumnsResponse, error)

func (*Server) GetDB

func (service *Server) GetDB(ctx context.Context, req *mspb.GetDBRequest) (*mspb.GetDBResponse, error)

func (*Server) GetLeader

func (service *Server) GetLeader() *Peer

func (*Server) GetMSLeader

func (service *Server) GetMSLeader(ctx context.Context, req *mspb.GetMSLeaderRequest) (*mspb.GetMSLeaderResponse, error)

func (*Server) GetNode

func (service *Server) GetNode(ctx context.Context, req *mspb.GetNodeRequest) (*mspb.GetNodeResponse, error)

func (*Server) GetNodeId

func (service *Server) GetNodeId(ctx context.Context, req *mspb.GetNodeIdRequest) (*mspb.GetNodeIdResponse, error)

func (*Server) GetRoute

func (service *Server) GetRoute(ctx context.Context, req *mspb.GetRouteRequest) (*mspb.GetRouteResponse, error)

func (*Server) GetTable

func (service *Server) GetTable(ctx context.Context, req *mspb.GetTableRequest) (*mspb.GetTableResponse, error)

func (*Server) GetTableById

func (service *Server) GetTableById(ctx context.Context, req *mspb.GetTableByIdRequest) (*mspb.GetTableByIdResponse, error)

func (*Server) InitAlarmServer

func (service *Server) InitAlarmServer(conf AlarmConfig) (err error)

func (*Server) InitMasterServer

func (service *Server) InitMasterServer(conf *Config)

func (*Server) InitMetricServer

func (service *Server) InitMetricServer(conf *Config)

func (*Server) InitServer

func (service *Server) InitServer(conf *Config)

func (*Server) IsLeader

func (service *Server) IsLeader() bool

func (*Server) MasterStart

func (service *Server) MasterStart()

func (*Server) MetricStart

func (service *Server) MetricStart()

func (*Server) NodeHeartbeat

func (service *Server) NodeHeartbeat(ctx context.Context, req *mspb.NodeHeartbeatRequest) (*mspb.NodeHeartbeatResponse, error)

func (*Server) NodeLogin

func (service *Server) NodeLogin(ctx context.Context, req *mspb.NodeLoginRequest) (*mspb.NodeLoginResponse, error)

func (*Server) ParseClusterInfo

func (service *Server) ParseClusterInfo() []*Peer

func (*Server) Quit

func (service *Server) Quit()

Quit 保存退出

func (*Server) RaftLeaderChange

func (service *Server) RaftLeaderChange(leaderId uint64)

func (*Server) RangeHeartbeat

func (service *Server) RangeHeartbeat(ctx context.Context, req *mspb.RangeHeartbeatRequest) (*mspb.RangeHeartbeatResponse, error)

func (*Server) ReportSplit

func (service *Server) ReportSplit(ctx context.Context, req *mspb.ReportSplitRequest) (*mspb.ReportSplitResponse, error)

func (*Server) Start

func (service *Server) Start() error

func (*Server) TruncateTable

type SignHandler

type SignHandler func(w http.ResponseWriter, r *http.Request) bool

type SnapshotCountLimitSelector

type SnapshotCountLimitSelector struct {
	// contains filtered or unexported fields
}

func NewSnapshotCountLimitSelector

func NewSnapshotCountLimitSelector(opt *scheduleOption) *SnapshotCountLimitSelector

func (*SnapshotCountLimitSelector) CanSelect

func (sel *SnapshotCountLimitSelector) CanSelect(node *Node) bool

func (*SnapshotCountLimitSelector) Name

func (sel *SnapshotCountLimitSelector) Name() string

type StorageThresholdSelector

type StorageThresholdSelector struct {
	// contains filtered or unexported fields
}

storageThresholdFilter ensures that we will not use an almost full node as a target.

func NewStorageThresholdSelector

func NewStorageThresholdSelector(opt *scheduleOption) *StorageThresholdSelector

func (*StorageThresholdSelector) CanSelect

func (sel *StorageThresholdSelector) CanSelect(node *Node) bool

func (*StorageThresholdSelector) Name

func (sel *StorageThresholdSelector) Name() string

type Store

type Store interface {
	Open() error
	Put(key, value []byte) error
	Delete(key []byte) error
	Get(key []byte) ([]byte, error)
	Scan(startKey, limitKey []byte) Iterator
	NewBatch() Batch
	Close() error
}

func NewLevelDBDriver

func NewLevelDBDriver(path string) (Store, error)

type StoreConfig

type StoreConfig struct {
	RaftRetainLogs        int64
	RaftHeartbeatInterval time.Duration
	RaftHeartbeatAddr     string
	RaftReplicateAddr     string
	RaftPeers             []*Peer

	NodeID   uint64
	DataPath string

	LeaderChangeHandler raftgroup.RaftLeaderChangeHandler
	FatalHandler        raftgroup.RaftFatalEventHandler
}

type Table

type Table struct {
	*metapb.Table
	// contains filtered or unexported fields
}

func NewTable

func NewTable(t *metapb.Table) *Table

func (*Table) GenColId

func (t *Table) GenColId() uint64

func (*Table) GetColumnById

func (t *Table) GetColumnById(id uint64) (*metapb.Column, bool)

func (*Table) GetColumnByName

func (t *Table) GetColumnByName(name string) (*metapb.Column, bool)

func (*Table) GetColumns

func (t *Table) GetColumns() []*metapb.Column

func (*Table) GetPkColumns

func (t *Table) GetPkColumns() []*metapb.Column

func (*Table) MergeColumn

func (t *Table) MergeColumn(source []*metapb.Column, cluster *Cluster) error

func (*Table) Name

func (t *Table) Name() string

func (*Table) UpdateSchema

func (t *Table) UpdateSchema(columns []*metapb.Column, store Store) ([]*metapb.Column, error)

type TableCache

type TableCache struct {
	// contains filtered or unexported fields
}

func NewTableCache

func NewTableCache() *TableCache

func (*TableCache) Add

func (tc *TableCache) Add(t *Table)

func (*TableCache) DeleteById

func (tc *TableCache) DeleteById(id uint64)

func (*TableCache) DeleteByName

func (tc *TableCache) DeleteByName(name string)

func (*TableCache) FindTableById

func (tc *TableCache) FindTableById(id uint64) (*Table, bool)

func (*TableCache) FindTableByName

func (tc *TableCache) FindTableByName(name string) (*Table, bool)

func (*TableCache) GetAllTable

func (tc *TableCache) GetAllTable() []*Table

func (*TableCache) Size

func (tc *TableCache) Size() int

type TableProperty

type TableProperty struct {
	Columns []*metapb.Column `json:"columns"`
	Regxs   []*metapb.Column `json:"regxs"`
}

type TrashReplicaGCWorker

type TrashReplicaGCWorker struct {
	// contains filtered or unexported fields
}

func (*TrashReplicaGCWorker) AllowWork

func (tr *TrashReplicaGCWorker) AllowWork(cluster *Cluster) bool

func (*TrashReplicaGCWorker) GetInterval

func (tr *TrashReplicaGCWorker) GetInterval() time.Duration

func (*TrashReplicaGCWorker) GetName

func (tr *TrashReplicaGCWorker) GetName() string

func (*TrashReplicaGCWorker) Stop

func (tr *TrashReplicaGCWorker) Stop()

func (*TrashReplicaGCWorker) Work

func (tr *TrashReplicaGCWorker) Work(cluster *Cluster)

type TryChangeLeaderEvent

type TryChangeLeaderEvent struct {
	RangeEventMeta
	// contains filtered or unexported fields
}

func NewTryChangeLeaderEvent

func NewTryChangeLeaderEvent(id, rangeId uint64, preLeader, expLeader *metapb.Peer, creator string) *TryChangeLeaderEvent

func (*TryChangeLeaderEvent) Execute

func (e *TryChangeLeaderEvent) Execute(cluster *Cluster, r *Range) (ExecNextEvent, *taskpb.Task, error)

type ValidHandler

type ValidHandler func(w http.ResponseWriter, r *http.Request) bool

type Worker

type Worker interface {
	GetName() string
	GetInterval() time.Duration
	AllowWork(cluster *Cluster) bool
	Work(cluster *Cluster)
	Stop()
}

func NewBalanceNodeLeaderWorker

func NewBalanceNodeLeaderWorker(wm *WorkerManager, interval time.Duration) Worker

func NewCreateTableWorker

func NewCreateTableWorker(wm *WorkerManager, interval time.Duration) Worker

func NewDeleteTableWorker

func NewDeleteTableWorker(wm *WorkerManager, interval time.Duration) Worker

func NewFailoverWorker

func NewFailoverWorker(wm *WorkerManager, interval time.Duration) Worker

func NewRangeHbCheckWorker

func NewRangeHbCheckWorker(wm *WorkerManager, interval time.Duration) Worker

func NewTrashReplicaGCWorker

func NewTrashReplicaGCWorker(wm *WorkerManager, interval time.Duration) Worker

type WorkerManager

type WorkerManager struct {
	// contains filtered or unexported fields
}

func NewWorkerManager

func NewWorkerManager(cluster *Cluster, opt *scheduleOption) *WorkerManager

func (*WorkerManager) GetAllWorker

func (wm *WorkerManager) GetAllWorker() []string

func (*WorkerManager) GetWorker

func (wm *WorkerManager) GetWorker(workerName string) string

func (*WorkerManager) Run

func (wm *WorkerManager) Run()

func (*WorkerManager) Stop

func (wm *WorkerManager) Stop()

type WriterOpsThresholdSelector

type WriterOpsThresholdSelector struct {
	// contains filtered or unexported fields
}

WriterOpsThresholdSelector ensures that we will not use an almost busy node as a target.

func NewWriterOpsThresholdSelector

func NewWriterOpsThresholdSelector(opt *scheduleOption) *WriterOpsThresholdSelector

func (*WriterOpsThresholdSelector) CanSelect

func (sel *WriterOpsThresholdSelector) CanSelect(node *Node) bool

func (*WriterOpsThresholdSelector) Name

func (sel *WriterOpsThresholdSelector) Name() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL