Documentation ¶
Index ¶
- Constants
- type ActiveTable
- func (t *ActiveTable) Delete(ctx context.Context, req *regattapb.DeleteRangeRequest) (*regattapb.DeleteRangeResponse, error)
- func (t *ActiveTable) Iterator(ctx context.Context, req *regattapb.RangeRequest) (iter.Seq[*regattapb.ResponseOp_Range], error)
- func (t *ActiveTable) LeaderIndex(ctx context.Context, linearizable bool) (*fsm.IndexResponse, error)
- func (t *ActiveTable) LocalIndex(ctx context.Context, linearizable bool) (*fsm.IndexResponse, error)
- func (t *ActiveTable) Put(ctx context.Context, req *regattapb.PutRequest) (*regattapb.PutResponse, error)
- func (t *ActiveTable) Range(ctx context.Context, req *regattapb.RangeRequest) (*regattapb.RangeResponse, error)
- func (t *ActiveTable) Reset(ctx context.Context) error
- func (t *ActiveTable) Snapshot(ctx context.Context, writer io.Writer) (*fsm.SnapshotResponse, error)
- func (t *ActiveTable) Txn(ctx context.Context, req *regattapb.TxnRequest) (*regattapb.TxnResponse, error)
- type Cleanup
- type Config
- type Lease
- type Manager
- func (m *Manager) Close()
- func (m *Manager) CreateTable(name string) (Table, error)
- func (m *Manager) DeleteTable(name string) error
- func (m *Manager) GetTable(name string) (ActiveTable, error)
- func (m *Manager) GetTableByID(id uint64) (ActiveTable, error)
- func (m *Manager) GetTables() ([]Table, error)
- func (m *Manager) LeaseTable(name string, lease time.Duration) error
- func (m *Manager) Restore(name string, reader io.Reader) error
- func (m *Manager) ReturnTable(name string) (bool, error)
- func (m *Manager) Start()
- type MetaConfig
- type SnapshotRecoveryType
- type Table
- type TableConfig
Constants ¶
const MaxValueLen = 2 * 1024 * 1024
MaxValueLen 2MB max value.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type ActiveTable ¶
type ActiveTable struct { Table // contains filtered or unexported fields }
ActiveTable could be queried and new proposals could be made through it.
func (*ActiveTable) Delete ¶
func (t *ActiveTable) Delete(ctx context.Context, req *regattapb.DeleteRangeRequest) (*regattapb.DeleteRangeResponse, error)
Delete performs a DeleteRange proposal into the Raft, supplied context must have a deadline set.
func (*ActiveTable) Iterator ¶ added in v0.4.0
func (t *ActiveTable) Iterator(ctx context.Context, req *regattapb.RangeRequest) (iter.Seq[*regattapb.ResponseOp_Range], error)
Iterator returns open pebble.Iterator it is an API consumer responsibility to close it.
func (*ActiveTable) LeaderIndex ¶
func (t *ActiveTable) LeaderIndex(ctx context.Context, linearizable bool) (*fsm.IndexResponse, error)
LeaderIndex returns leader index.
func (*ActiveTable) LocalIndex ¶
func (t *ActiveTable) LocalIndex(ctx context.Context, linearizable bool) (*fsm.IndexResponse, error)
LocalIndex returns local index.
func (*ActiveTable) Put ¶
func (t *ActiveTable) Put(ctx context.Context, req *regattapb.PutRequest) (*regattapb.PutResponse, error)
Put performs a Put proposal into the Raft, supplied context must have a deadline set.
func (*ActiveTable) Range ¶
func (t *ActiveTable) Range(ctx context.Context, req *regattapb.RangeRequest) (*regattapb.RangeResponse, error)
Range performs a Range query in the Raft data, supplied context must have a deadline set.
func (*ActiveTable) Reset ¶
func (t *ActiveTable) Reset(ctx context.Context) error
Reset resets the leader index to 0.
func (*ActiveTable) Snapshot ¶
func (t *ActiveTable) Snapshot(ctx context.Context, writer io.Writer) (*fsm.SnapshotResponse, error)
Snapshot streams snapshot to the provided writer.
func (*ActiveTable) Txn ¶
func (t *ActiveTable) Txn(ctx context.Context, req *regattapb.TxnRequest) (*regattapb.TxnResponse, error)
type Config ¶ added in v0.2.0
type Config struct { // NodeID is a non-zero value used to identify a node within a Raft cluster. NodeID uint64 // Table is a configuration for table OnDisk state machines. Table TableConfig // Meta is a configuration for metadata inmemory state machine. Meta MetaConfig }
type Manager ¶ added in v0.2.0
type Manager struct {
// contains filtered or unexported fields
}
func NewManager ¶ added in v0.2.0
func (*Manager) CreateTable ¶ added in v0.2.0
func (*Manager) DeleteTable ¶ added in v0.2.0
func (*Manager) GetTable ¶ added in v0.2.0
func (m *Manager) GetTable(name string) (ActiveTable, error)
func (*Manager) GetTableByID ¶ added in v0.2.0
func (m *Manager) GetTableByID(id uint64) (ActiveTable, error)
func (*Manager) LeaseTable ¶ added in v0.2.0
func (*Manager) ReturnTable ¶ added in v0.2.0
ReturnTable returns true if it was leased previously.
type MetaConfig ¶ added in v0.2.0
type MetaConfig struct { // ElectionRTT is the minimum number of message RTT between elections. Message // RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggests it // to be a magnitude greater than HeartbeatRTT, which is the interval between // two heartbeats. In Raft, the actual interval between elections is // randomized to be between ElectionRTT and 2 * ElectionRTT. // // As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond, // to set the election interval to be 1 second, then ElectionRTT should be set // to 10. // // When CheckQuorum is enabled, ElectionRTT also defines the interval for // checking leader quorum. ElectionRTT uint64 // HeartbeatRTT is the number of message RTT between heartbeats. Message // RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggest the // heartbeat interval to be close to the average RTT between nodes. // // As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond, // to set the heartbeat interval to be every 200 milliseconds, then // HeartbeatRTT should be set to 2. HeartbeatRTT uint64 // SnapshotEntries defines how often the state machine should be snapshotted // automcatically. It is defined in terms of the number of applied Raft log // entries. SnapshotEntries can be set to 0 to disable such automatic // snapshotting. // // When SnapshotEntries is set to N, it means a snapshot is created for // roughly every N applied Raft log entries (proposals). This also implies // that sending N log entries to a follower is more expensive than sending a // snapshot. // // Once a snapshot is generated, Raft log entries covered by the new snapshot // can be compacted. This involves two steps, redundant log entries are first // marked as deleted, then they are physically removed from the underlying // storage when a LogDB compaction is issued at a later stage. See the godoc // on CompactionOverhead for details on what log entries are actually removed // and compacted after generating a snapshot. // // Once automatic snapshotting is disabled by setting the SnapshotEntries // field to 0, users can still use NodeHost's RequestSnapshot or // SyncRequestSnapshot methods to manually request snapshots. SnapshotEntries uint64 // CompactionOverhead defines the number of most recent entries to keep after // each Raft log compaction. Raft log compaction is performance automatically // every time when a snapshot is created. // // For example, when a snapshot is created at let's say index 10,000, then all // Raft log entries with index <= 10,000 can be removed from that node as they // have already been covered by the created snapshot image. This frees up the // maximum storage space but comes at the cost that the full snapshot will // have to be sent to the follower if the follower requires any Raft log entry // at index <= 10,000. When CompactionOverhead is set to say 500, Dragonboat // then compacts the Raft log up to index 9,500 and keeps Raft log entries // between index (9,500, 1,0000]. As a result, the node can still replicate // Raft log entries between index (9,500, 1,0000] to other peers and only fall // back to stream the full snapshot if any Raft log entry with index <= 9,500 // is required to be replicated. CompactionOverhead uint64 // MetaMaxInMemLogSize is the target size in bytes allowed for storing in memory // Raft logs on each Raft node. In memory Raft logs are the ones that have // not been applied yet. // MaxInMemLogSize is a target value implemented to prevent unbounded memory // growth, it is not for precisely limiting the exact memory usage. // When MaxInMemLogSize is 0, the target is set to math.MaxUint64. When // MaxInMemLogSize is set and the target is reached, error will be returned // when clients try to make new proposals. // MaxInMemLogSize is recommended to be significantly larger than the biggest // proposal you are going to use. MaxInMemLogSize uint64 }
type SnapshotRecoveryType ¶ added in v0.2.0
type SnapshotRecoveryType fsm.SnapshotRecoveryType
const ( RecoveryTypeSnapshot SnapshotRecoveryType = iota RecoveryTypeCheckpoint )
type Table ¶
type Table struct { Name string `json:"name"` ClusterID uint64 `json:"cluster_id"` RecoverID uint64 `json:"recover_id"` }
Table stored representation of a table.
func (Table) AsActive ¶
func (t Table) AsActive(host raftHandler) ActiveTable
AsActive returns ActiveTable wrapper of this table.
type TableConfig ¶ added in v0.2.0
type TableConfig struct { // ElectionRTT is the minimum number of message RTT between elections. Message // RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggests it // to be a magnitude greater than HeartbeatRTT, which is the interval between // two heartbeats. In Raft, the actual interval between elections is // randomized to be between ElectionRTT and 2 * ElectionRTT. // // As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond, // to set the election interval to be 1 second, then ElectionRTT should be set // to 10. // // When CheckQuorum is enabled, ElectionRTT also defines the interval for // checking leader quorum. ElectionRTT uint64 // MetaHeartbeatRTT is the number of message RTT between heartbeats. Message // RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggest the // heartbeat interval to be close to the average RTT between nodes. // // As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond, // to set the heartbeat interval to be every 200 milliseconds, then // HeartbeatRTT should be set to 2. HeartbeatRTT uint64 // SnapshotEntries defines how often the state machine should be snapshotted // automcatically. It is defined in terms of the number of applied Raft log // entries. SnapshotEntries can be set to 0 to disable such automatic // snapshotting. // // When SnapshotEntries is set to N, it means a snapshot is created for // roughly every N applied Raft log entries (proposals). This also implies // that sending N log entries to a follower is more expensive than sending a // snapshot. // // Once a snapshot is generated, Raft log entries covered by the new snapshot // can be compacted. This involves two steps, redundant log entries are first // marked as deleted, then they are physically removed from the underlying // storage when a LogDB compaction is issued at a later stage. See the godoc // on CompactionOverhead for details on what log entries are actually removed // and compacted after generating a snapshot. // // Once automatic snapshotting is disabled by setting the SnapshotEntries // field to 0, users can still use NodeHost's RequestSnapshot or // SyncRequestSnapshot methods to manually request snapshots. SnapshotEntries uint64 // CompactionOverhead defines the number of most recent entries to keep after // each Raft log compaction. Raft log compaction is performance automatically // every time when a snapshot is created. // // For example, when a snapshot is created at let's say index 10,000, then all // Raft log entries with index <= 10,000 can be removed from that node as they // have already been covered by the created snapshot image. This frees up the // maximum storage space but comes at the cost that the full snapshot will // have to be sent to the follower if the follower requires any Raft log entry // at index <= 10,000. When CompactionOverhead is set to say 500, Dragonboat // then compacts the Raft log up to index 9,500 and keeps Raft log entries // between index (9,500, 1,0000]. As a result, the node can still replicate // Raft log entries between index (9,500, 1,0000] to other peers and only fall // back to stream the full snapshot if any Raft log entry with index <= 9,500 // is required to be replicated. CompactionOverhead uint64 // MaxInMemLogSize is the target size in bytes allowed for storing in memory // Raft logs on each Raft node. In memory Raft logs are the ones that have // not been applied yet. // MaxInMemLogSize is a target value implemented to prevent unbounded memory // growth, it is not for precisely limiting the exact memory usage. // When MaxInMemLogSize is 0, the target is set to math.MaxUint64. When // MaxInMemLogSize is set and the target is reached, error will be returned // when clients try to make new proposals. // MaxInMemLogSize is recommended to be significantly larger than the biggest // proposal you are going to use. MaxInMemLogSize uint64 // FS is the filesystem to use for IOnDiskStateMachine, useful for testing, // uses the real vfs.Default if nil. FS vfs.FS // DataDir is where table data is stored. DataDir string // BlockCacheSize shared block cache size in bytes, the cache is used to hold uncompressed blocks of data in memory. BlockCacheSize int64 // TableCacheSize shared table cache size, the cache is used to hold handles to open SSTs. TableCacheSize int // RecoveryType the in-cluster snapshot recovery type. RecoveryType SnapshotRecoveryType AppliedIndexListener func(table string, rev uint64) }