cluster

package
v1.6.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 15, 2021 License: MIT Imports: 11 Imported by: 0

Documentation

Index

Constants

View Source
const (
	ErrorParamsEmpty liberr.CodeError = iota + liberr.MinPkgCluster
	ErrorParamsMissing
	ErrorParamsMismatching
	ErrorLeader
	ErrorLeaderTransfer
	ErrorNodeUser
	ErrorNodeHostNew
	ErrorNodeHostStart
	ErrorNodeHostJoin
	ErrorNodeHostStop
	ErrorNodeHostRestart
	ErrorCommandSync
	ErrorCommandASync
	ErrorCommandLocal
	ErrorValidateConfig
	ErrorValidateCluster
	ErrorValidateNode
	ErrorValidateGossip
	ErrorValidateExpert
	ErrorValidateEngine
)

Variables

This section is empty.

Functions

func IsCodeError

func IsCodeError() bool

Types

type Cluster

type Cluster interface {
	GetConfig() dgbcfg.Config
	SetConfig(cfg dgbcfg.Config)
	GetNodeHostConfig() dgbcfg.NodeHostConfig

	GetFctCreate() dgbstm.CreateStateMachineFunc
	GetFctCreateConcurrent() dgbstm.CreateConcurrentStateMachineFunc
	GetFctCreateOnDisk() dgbstm.CreateOnDiskStateMachineFunc
	SetFctCreate(fctCreate interface{})
	SetFctCreateSTM(fctCreate dgbstm.CreateStateMachineFunc)
	SetFctCreateSTMConcurrent(fctCreate dgbstm.CreateConcurrentStateMachineFunc)
	SetFctCreateSTMOnDisk(fctCreate dgbstm.CreateOnDiskStateMachineFunc)

	GetMemberInit() map[uint64]dgbclt.Target
	SetMemberInit(memberList map[uint64]dgbclt.Target)

	SetTimeoutCommandSync(timeout time.Duration)
	SetTimeoutCommandASync(timeout time.Duration)

	HasNodeInfo(nodeId uint64) bool
	RaftAddress() string
	ID() string

	ClusterStart(join bool) liberr.Error
	ClusterStop(force bool) liberr.Error
	ClusterRestart(force bool) liberr.Error

	NodeStop(target uint64) liberr.Error
	NodeRestart(force bool) liberr.Error

	GetLeaderID() (leader uint64, valid bool, err liberr.Error)
	GetNoOPSession() *dgbcli.Session
	GetNodeHostInfo(opt dgbclt.NodeHostInfoOption) *dgbclt.NodeHostInfo
	RequestLeaderTransfer(targetNodeID uint64) liberr.Error

	StaleReadDangerous(query interface{}) (interface{}, error)

	SyncPropose(parent context.Context, session *dgbcli.Session, cmd []byte) (dgbstm.Result, liberr.Error)
	SyncRead(parent context.Context, query interface{}) (interface{}, liberr.Error)
	SyncGetClusterMembership(parent context.Context) (*dgbclt.Membership, liberr.Error)
	SyncGetSession(parent context.Context) (*dgbcli.Session, liberr.Error)
	SyncCloseSession(parent context.Context, cs *dgbcli.Session) liberr.Error
	SyncRequestSnapshot(parent context.Context, opt dgbclt.SnapshotOption) (uint64, liberr.Error)
	SyncRequestDeleteNode(parent context.Context, nodeID uint64, configChangeIndex uint64) liberr.Error
	SyncRequestAddNode(parent context.Context, nodeID uint64, target string, configChangeIndex uint64) liberr.Error
	SyncRequestAddObserver(parent context.Context, nodeID uint64, target string, configChangeIndex uint64) liberr.Error
	SyncRequestAddWitness(parent context.Context, nodeID uint64, target string, configChangeIndex uint64) liberr.Error
	SyncRemoveData(parent context.Context, nodeID uint64) liberr.Error

	AsyncPropose(session *dgbcli.Session, cmd []byte) (*dgbclt.RequestState, liberr.Error)
	AsyncProposeSession(session *dgbcli.Session) (*dgbclt.RequestState, liberr.Error)
	AsyncReadIndex() (*dgbclt.RequestState, liberr.Error)
	AsyncRequestCompaction(nodeID uint64) (*dgbclt.SysOpState, liberr.Error)

	LocalReadNode(rs *dgbclt.RequestState, query interface{}) (interface{}, liberr.Error)
	LocalNAReadNode(rs *dgbclt.RequestState, query []byte) ([]byte, liberr.Error)
}

func NewCluster

func NewCluster(cfg Config, fctCreate interface{}) (Cluster, liberr.Error)

type Config

type Config struct {
	Node       ConfigNode        `mapstructure:"node" json:"node" yaml:"node" toml:"node"`
	Cluster    ConfigCluster     `mapstructure:"cluster" json:"cluster" yaml:"cluster" toml:"cluster"`
	InitMember map[uint64]string `mapstructure:"init_member" json:"init_member" yaml:"init_member" toml:"init_member"`
}

func (Config) GetDGBConfigCluster

func (c Config) GetDGBConfigCluster() dgbcfg.Config

func (Config) GetDGBConfigNode

func (c Config) GetDGBConfigNode() dgbcfg.NodeHostConfig

func (Config) GetInitMember

func (c Config) GetInitMember() map[uint64]dgbclt.Target

func (Config) Validate

func (c Config) Validate() liberr.Error

type ConfigCluster

type ConfigCluster struct {
	// NodeID is a non-zero value used to identify a node within a Raft cluster.
	NodeID uint64 `mapstructure:"node_id" json:"node_id" yaml:"node_id" toml:"node_id"`

	// ClusterID is the unique value used to identify a Raft cluster.
	ClusterID uint64 `mapstructure:"cluster_id" json:"cluster_id" yaml:"cluster_id" toml:"cluster_id"`

	// CheckQuorum specifies whether the leader node should periodically check
	// non-leader node status and step down to become a follower node when it no
	// longer has the quorum.
	CheckQuorum bool `mapstructure:"check_quorum" json:"check_quorum" yaml:"check_quorum" toml:"check_quorum"`

	// ElectionRTT is the minimum number of message RTT between elections. Message
	// RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggests it
	// to be a magnitude greater than HeartbeatRTT, which is the interval between
	// two heartbeats. In Raft, the actual interval between elections is
	// randomized to be between ElectionRTT and 2 * ElectionRTT.
	//
	// As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond,
	// to set the election interval to be 1 second, then ElectionRTT should be set
	// to 10.
	//
	// When CheckQuorum is enabled, ElectionRTT also defines the interval for
	// checking leader quorum.
	ElectionRTT uint64 `mapstructure:"election_rtt" json:"election_rtt" yaml:"election_rtt" toml:"election_rtt"`

	// HeartbeatRTT is the number of message RTT between heartbeats. Message
	// RTT is defined by NodeHostConfig.RTTMillisecond. The Raft paper suggest the
	// heartbeat interval to be close to the average RTT between nodes.
	//
	// As an example, assuming NodeHostConfig.RTTMillisecond is 100 millisecond,
	// to set the heartbeat interval to be every 200 milliseconds, then
	// HeartbeatRTT should be set to 2.
	HeartbeatRTT uint64 `mapstructure:"heartbeat_rtt" json:"heartbeat_rtt" yaml:"heartbeat_rtt" toml:"heartbeat_rtt"`

	// SnapshotEntries defines how often the state machine should be snapshotted
	// automcatically. It is defined in terms of the number of applied Raft log
	// entries. SnapshotEntries can be set to 0 to disable such automatic
	// snapshotting.
	//
	// When SnapshotEntries is set to N, it means a snapshot is created for
	// roughly every N applied Raft log entries (proposals). This also implies
	// that sending N log entries to a follower is more expensive than sending a
	// snapshot.
	//
	// Once a snapshot is generated, Raft log entries covered by the new snapshot
	// can be compacted. This involves two steps, redundant log entries are first
	// marked as deleted, then they are physically removed from the underlying
	// storage when a LogDB compaction is issued at a later stage. See the godoc
	// on CompactionOverhead for details on what log entries are actually removed
	// and compacted after generating a snapshot.
	//
	// Once automatic snapshotting is disabled by setting the SnapshotEntries
	// field to 0, users can still use NodeHost's RequestSnapshot or
	// SyncRequestSnapshot methods to manually request snapshots.
	SnapshotEntries uint64 `mapstructure:"snapshot_entries" json:"snapshot_entries" yaml:"snapshot_entries" toml:"snapshot_entries"`

	// CompactionOverhead defines the number of most recent entries to keep after
	// each Raft log compaction. Raft log compaction is performance automatically
	// every time when a snapshot is created.
	//
	// For example, when a snapshot is created at let's say index 10,000, then all
	// Raft log entries with index <= 10,000 can be removed from that node as they
	// have already been covered by the created snapshot image. This frees up the
	// maximum storage space but comes at the cost that the full snapshot will
	// have to be sent to the follower if the follower requires any Raft log entry
	// at index <= 10,000. When CompactionOverhead is set to say 500, Dragonboat
	// then compacts the Raft log up to index 9,500 and keeps Raft log entries
	// between index (9,500, 1,0000]. As a result, the node can still replicate
	// Raft log entries between index (9,500, 1,0000] to other peers and only fall
	// back to stream the full snapshot if any Raft log entry with index <= 9,500
	// is required to be replicated.
	CompactionOverhead uint64 `mapstructure:"compaction_overhead" json:"compaction_overhead" yaml:"compaction_overhead" toml:"compaction_overhead"`

	// OrderedConfigChange determines whether Raft membership change is enforced
	// with ordered config change ID.
	OrderedConfigChange bool `mapstructure:"ordered_config_change" json:"ordered_config_change" yaml:"ordered_config_change" toml:"ordered_config_change"`

	// MaxInMemLogSize is the target size in bytes allowed for storing in memory
	// Raft logs on each Raft node. In memory Raft logs are the ones that have
	// not been applied yet.
	// MaxInMemLogSize is a target value implemented to prevent unbounded memory
	// growth, it is not for precisely limiting the exact memory usage.
	// When MaxInMemLogSize is 0, the target is set to math.MaxUint64. When
	// MaxInMemLogSize is set and the target is reached, error will be returned
	// when clients try to make new proposals.
	// MaxInMemLogSize is recommended to be significantly larger than the biggest
	// proposal you are going to use.
	MaxInMemLogSize uint64 `mapstructure:"max_in_mem_log_size" json:"max_in_mem_log_size" yaml:"max_in_mem_log_size" toml:"max_in_mem_log_size"`

	// SnapshotCompressionType is the compression type to use for compressing
	// generated snapshot data. No compression is used by default.
	SnapshotCompressionType dgbcfg.CompressionType `mapstructure:"snapshot_compression" json:"snapshot_compression" yaml:"snapshot_compression" toml:"snapshot_compression"`

	// EntryCompressionType is the compression type to use for compressing the
	// payload of user proposals. When Snappy is used, the maximum proposal
	// payload allowed is roughly limited to 3.42GBytes. No compression is used
	// by default.
	EntryCompressionType dgbcfg.CompressionType `mapstructure:"entry_compression" json:"entry_compression" yaml:"entry_compression" toml:"entry_compression"`

	// DisableAutoCompactions disables auto compaction used for reclaiming Raft
	// log entry storage spaces. By default, compaction request is issued every
	// time when a snapshot is created, this helps to reclaim disk spaces as
	// soon as possible at the cost of immediate higher IO overhead. Users can
	// disable such auto compactions and use NodeHost.RequestCompaction to
	// manually request such compactions when necessary.
	DisableAutoCompactions bool `` /* 135-byte string literal not displayed */

	// IsObserver indicates whether this is an observer Raft node without voting
	// power. Described as non-voting members in the section 4.2.1 of Diego
	// Ongaro's thesis, observer nodes are usually used to allow a new node to
	// join the cluster and catch up with other existing ndoes without impacting
	// the availability. Extra observer nodes can also be introduced to serve
	// read-only requests without affecting system write throughput.
	//
	// Observer support is currently experimental.
	IsObserver bool `mapstructure:"is_observer" json:"is_observer" yaml:"is_observer" toml:"is_observer"`

	// IsWitness indicates whether this is a witness Raft node without actual log
	// replication and do not have state machine. It is mentioned in the section
	// 11.7.2 of Diego Ongaro's thesis.
	//
	// Witness support is currently experimental.
	IsWitness bool `mapstructure:"is_witness" json:"is_witness" yaml:"is_witness" toml:"is_witness"`

	// Quiesce specifies whether to let the Raft cluster enter quiesce mode when
	// there is no cluster activity. Clusters in quiesce mode do not exchange
	// heartbeat messages to minimize bandwidth consumption.
	//
	// Quiesce support is currently experimental.
	Quiesce bool `mapstructure:"quiesce" json:"quiesce" yaml:"quiesce" toml:"quiesce"`
}

nolint #maligned

func (ConfigCluster) GetDGBConfigCluster

func (c ConfigCluster) GetDGBConfigCluster() dgbcfg.Config

func (ConfigCluster) Validate

func (c ConfigCluster) Validate() liberr.Error

type ConfigEngine

type ConfigEngine struct {
	// ExecShards is the number of execution shards in the first stage of the
	// execution engine. Default value is 16. Once deployed, this value can not
	// be changed later.
	ExecShards uint64 `mapstructure:"exec_shards" json:"exec_shards" yaml:"exec_shards" toml:"exec_shards"`

	// CommitShards is the number of commit shards in the second stage of the
	// execution engine. Default value is 16.
	CommitShards uint64 `mapstructure:"commit_shards" json:"commit_shards" yaml:"commit_shards" toml:"commit_shards"`

	// ApplyShards is the number of apply shards in the third stage of the
	// execution engine. Default value is 16.
	ApplyShards uint64 `mapstructure:"apply_shards" json:"apply_shards" yaml:"apply_shards" toml:"apply_shards"`

	// SnapshotShards is the number of snapshot shards in the forth stage of the
	// execution engine. Default value is 48.
	SnapshotShards uint64 `mapstructure:"snapshot_shards" json:"snapshot_shards" yaml:"snapshot_shards" toml:"snapshot_shards"`

	// CloseShards is the number of close shards used for closing stopped
	// state machines. Default value is 32.
	CloseShards uint64 `mapstructure:"close_shards" json:"close_shards" yaml:"close_shards" toml:"close_shards"`
}

func (ConfigEngine) GetDGBConfigEngine

func (c ConfigEngine) GetDGBConfigEngine() dgbcfg.EngineConfig

func (ConfigEngine) Validate

func (c ConfigEngine) Validate() liberr.Error

type ConfigExpert

type ConfigExpert struct {
	// Engine is the cponfiguration for the execution engine.
	Engine ConfigEngine `mapstructure:"engine" json:"engine" yaml:"engine" toml:"engine"`

	// TestNodeHostID is the NodeHostID value to be used by the NodeHost instance.
	// This field is expected to be used in tests only.
	TestNodeHostID uint64 `mapstructure:"test_node_host_id" json:"test_node_host_id" yaml:"test_node_host_id" toml:"test_node_host_id"`

	// TestGossipProbeInterval define the probe interval used by the gossip
	// service in tests.
	TestGossipProbeInterval time.Duration `` /* 143-byte string literal not displayed */
}

func (ConfigExpert) GetDGBConfigExpert

func (c ConfigExpert) GetDGBConfigExpert() dgbcfg.ExpertConfig

func (ConfigExpert) Validate

func (c ConfigExpert) Validate() liberr.Error

type ConfigGossip

type ConfigGossip struct {
	// BindAddress is the address for the gossip service to bind to and listen on.
	// Both UDP and TCP ports are used by the gossip service. The local gossip
	// service should be able to receive gossip service related messages by
	// binding to and listening on this address. BindAddress is usually in the
	// format of IP:Port, Hostname:Port or DNS Name:Port.
	BindAddress string `mapstructure:"bind_address" json:"bind_address" yaml:"bind_address" toml:"bind_address"`

	// AdvertiseAddress is the address to advertise to other NodeHost instances
	// used for NAT traversal. Gossip services running on remote NodeHost
	// instances will use AdvertiseAddress to exchange gossip service related
	// messages. AdvertiseAddress is in the format of IP:Port.
	AdvertiseAddress string `mapstructure:"advertise_address" json:"advertise_address" yaml:"advertise_address" toml:"advertise_address"`

	// Seed is a list of AdvertiseAddress of remote NodeHost instances. Local
	// NodeHost instance will try to contact all of them to bootstrap the gossip
	// service. At least one reachable NodeHost instance is required to
	// successfully bootstrap the gossip service. Each seed address is in the
	// format of IP:Port, Hostname:Port or DNS Name:Port.
	//
	// It is ok to include seed addresses that are temporarily unreachable, e.g.
	// when launching the first NodeHost instance in your deployment, you can
	// include AdvertiseAddresses from other NodeHost instances that you plan to
	// launch shortly afterwards.
	Seed []string `mapstructure:"seed" json:"seed" yaml:"seed" toml:"seed"`
}

func (ConfigGossip) GetDGBConfigGossip

func (c ConfigGossip) GetDGBConfigGossip() dgbcfg.GossipConfig

func (ConfigGossip) Validate

func (c ConfigGossip) Validate() liberr.Error

type ConfigNode

type ConfigNode struct {
	// DeploymentID is used to determine whether two NodeHost instances belong to
	// the same deployment and thus allowed to communicate with each other. This
	// helps to prvent accidentially misconfigured NodeHost instances to cause
	// data corruption errors by sending out of context messages to unrelated
	// Raft nodes.
	// For a particular dragonboat based application, you can set DeploymentID
	// to the same uint64 value on all production NodeHost instances, then use
	// different DeploymentID values on your staging and dev environment. It is
	// also recommended to use different DeploymentID values for different
	// dragonboat based applications.
	// When not set, the default value 0 will be used as the deployment ID and
	// thus allowing all NodeHost instances with deployment ID 0 to communicate
	// with each other.
	DeploymentID uint64 `mapstructure:"deployment_id" json:"deployment_id" yaml:"deployment_id" toml:"deployment_id"`

	// WALDir is the directory used for storing the WAL of Raft entries. It is
	// recommended to use low latency storage such as NVME SSD with power loss
	// protection to store such WAL data. Leave WALDir to have zero value will
	// have everything stored in NodeHostDir.
	WALDir string `mapstructure:"wal_dir" json:"wal_dir" yaml:"wal_dir" toml:"wal_dir"`

	// NodeHostDir is where everything else is stored.
	NodeHostDir string `mapstructure:"node_host_dir" json:"node_host_dir" yaml:"node_host_dir" toml:"node_host_dir"`

	//nolint #godox
	// RTTMillisecond defines the average Rround Trip Time (RTT) in milliseconds
	// between two NodeHost instances. Such a RTT interval is internally used as
	// a logical clock tick, Raft heartbeat and election intervals are both
	// defined in term of how many such logical clock ticks (RTT intervals).
	// Note that RTTMillisecond is the combined delays between two NodeHost
	// instances including all delays caused by network transmission, delays
	// caused by NodeHost queuing and processing. As an example, when fully
	// loaded, the average Rround Trip Time between two of our NodeHost instances
	// used for benchmarking purposes is up to 500 microseconds when the ping time
	// between them is 100 microseconds. Set RTTMillisecond to 1 when it is less
	// than 1 million in your environment.
	RTTMillisecond uint64 `mapstructure:"rtt_millisecond" json:"rtt_millisecond" yaml:"rtt_millisecond" toml:"rtt_millisecond"`

	// RaftAddress is a DNS name:port or IP:port address used by the transport
	// module for exchanging Raft messages, snapshots and metadata between
	// NodeHost instances. It should be set to the public address that can be
	// accessed from remote NodeHost instances.
	//
	// When the NodeHostConfig.ListenAddress field is empty, NodeHost listens on
	// RaftAddress for incoming Raft messages. When hostname or domain name is
	// used, it will be resolved to IPv4 addresses first and Dragonboat listens
	// to all resolved IPv4 addresses.
	//
	// By default, the RaftAddress value is not allowed to change between NodeHost
	// restarts. AddressByNodeHostID should be set to true when the RaftAddress
	// value might change after restart.
	RaftAddress string `mapstructure:"raft_address" json:"raft_address" yaml:"raft_address" toml:"raft_address"`

	//nolint #godox
	// AddressByNodeHostID indicates that NodeHost instances should be addressed
	// by their NodeHostID values. This feature is usually used when only dynamic
	// addresses are available. When enabled, NodeHostID values should be used
	// as the target parameter when calling NodeHost's StartCluster,
	// RequestAddNode, RequestAddObserver and RequestAddWitness methods.
	//
	// Enabling AddressByNodeHostID also enables the internal gossip service,
	// NodeHostConfig.Gossip must be configured to control the behaviors of the
	// gossip service.
	//
	// Note that once enabled, the AddressByNodeHostID setting can not be later
	// disabled after restarts.
	//
	// Please see the godocs of the NodeHostConfig.Gossip field for a detailed
	// example on how AddressByNodeHostID and gossip works.
	AddressByNodeHostID bool `` /* 131-byte string literal not displayed */

	// ListenAddress is an optional field in the hostname:port or IP:port address
	// form used by the transport module to listen on for Raft message and
	// snapshots. When the ListenAddress field is not set, The transport module
	// listens on RaftAddress. If 0.0.0.0 is specified as the IP of the
	// ListenAddress, Dragonboat listens to the specified port on all network
	// interfaces. When hostname or domain name is used, it will be resolved to
	// IPv4 addresses first and Dragonboat listens to all resolved IPv4 addresses.
	ListenAddress string `mapstructure:"listen_address" json:"listen_address" yaml:"listen_address" toml:"listen_address"`

	// MutualTLS defines whether to use mutual TLS for authenticating servers
	// and clients. Insecure communication is used when MutualTLS is set to
	// False.
	// See https://github.com/lni/dragonboat/wiki/TLS-in-Dragonboat for more
	// details on how to use Mutual TLS.
	MutualTLS bool `mapstructure:"mutual_tls" json:"mutual_tls" yaml:"tls" toml:"tls"`

	// CAFile is the path of the CA certificate file. This field is ignored when
	// MutualTLS is false.
	CAFile string `mapstructure:"ca_file" json:"ca_file" yaml:"ca_file" toml:"ca_file"`

	// CertFile is the path of the node certificate file. This field is ignored
	// when MutualTLS is false.
	CertFile string `mapstructure:"cert_file" json:"cert_file" yaml:"cert_file" toml:"cert_file"`

	// KeyFile is the path of the node key file. This field is ignored when
	// MutualTLS is false.
	KeyFile string `mapstructure:"key_file" json:"key_file" yaml:"key_file" toml:"key_file"`

	// EnableMetrics determines whether health metrics in Prometheus format should
	// be enabled.
	EnableMetrics bool `mapstructure:"enable_metrics" json:"enable_metrics" yaml:"enable_metrics" toml:"enable_metrics"`

	// MaxSendQueueSize is the maximum size in bytes of each send queue.
	// Once the maximum size is reached, further replication messages will be
	// dropped to restrict memory usage. When set to 0, it means the send queue
	// size is unlimited.
	MaxSendQueueSize uint64 `mapstructure:"max_send_queue_size" json:"max_send_queue_size" yaml:"max_send_queue_size" toml:"max_send_queue_size"`

	// MaxReceiveQueueSize is the maximum size in bytes of each receive queue.
	// Once the maximum size is reached, further replication messages will be
	// dropped to restrict memory usage. When set to 0, it means the queue size
	// is unlimited.
	MaxReceiveQueueSize uint64 `` /* 127-byte string literal not displayed */

	// MaxSnapshotSendBytesPerSecond defines how much snapshot data can be sent
	// every second for all Raft clusters managed by the NodeHost instance.
	// The default value 0 means there is no limit set for snapshot streaming.
	MaxSnapshotSendBytesPerSecond uint64 `` /* 175-byte string literal not displayed */

	// MaxSnapshotRecvBytesPerSecond defines how much snapshot data can be
	// received each second for all Raft clusters managed by the NodeHost instance.
	// The default value 0 means there is no limit for receiving snapshot data.
	MaxSnapshotRecvBytesPerSecond uint64 `` /* 175-byte string literal not displayed */

	// NotifyCommit specifies whether clients should be notified when their
	// regular proposals and config change requests are committed. By default,
	// commits are not notified, clients are only notified when their proposals
	// are both committed and applied.
	NotifyCommit bool `mapstructure:"notify_commit" json:"notify_commit" yaml:"notify_commit" toml:"notify_commit"`

	// Gossip contains configurations for the gossip service. When the
	// AddressByNodeHostID field is set to true, each NodeHost instance will use
	// an internal gossip service to exchange knowledges of known NodeHost
	// instances including their RaftAddress and NodeHostID values. This Gossip
	// field contains configurations that controls how the gossip service works.
	//
	// As an detailed example on how to use the gossip service in the situation
	// where all available machines have dynamically assigned IPs on reboot -
	//
	// Consider that there are three NodeHost instances on three machines, each
	// of them has a dynamically assigned IP address which will change on reboot.
	// NodeHostConfig.RaftAddress should be set to the current address that can be
	// reached by remote NodeHost instance. In this example, we will assume they
	// are
	//
	// 10.0.0.100:24000
	// 10.0.0.200:24000
	// 10.0.0.300:24000
	//
	// To use these machines, first enable the NodeHostConfig.AddressByNodeHostID
	// field and start the NodeHost instances. The NodeHostID value of each
	// NodeHost instance can be obtained by calling NodeHost.ID(). Let's say they
	// are
	//
	// "nhid-xxxxx",
	// "nhid-yyyyy",
	// "nhid-zzzzz".
	//
	// All these NodeHostID are fixed, they will never change after reboots.
	//
	// When starting Raft nodes or requesting new nodes to be added, use the above
	// mentioned NodeHostID values as the target parameters (which are of the
	// Target type). Let's say we want to start a Raft Node as a part of a three
	// replicas Raft cluster, the initialMembers parameter of the StartCluster
	// method can be set to
	//
	// initialMembers := map[uint64]Target {
	// 	 1: "nhid-xxxxx",
	//   2: "nhid-yyyyy",
	//   3: "nhid-zzzzz",
	// }
	//
	// This indicates that node 1 of the cluster will be running on the NodeHost
	// instance identified by the NodeHostID value "nhid-xxxxx", node 2 of the
	// same cluster will be running on the NodeHost instance identified by the
	// NodeHostID value of "nhid-yyyyy" and so on.
	//
	// The internal gossip service exchanges NodeHost details, including their
	// NodeHostID and RaftAddress values, with all other known NodeHost instances.
	// Thanks to the nature of gossip, it will eventually allow each NodeHost
	// instance to be aware of the current details of all NodeHost instances.
	// As a result, let's say when Raft node 1 wants to send a Raft message to
	// node 2, it first figures out that node 2 is running on the NodeHost
	// identified by the NodeHostID value "nhid-yyyyy", RaftAddress information
	// from the gossip service further shows that "nhid-yyyyy" maps to a machine
	// currently reachable at 10.0.0.200:24000. Raft messages can thus be
	// delivered.
	//
	// The Gossip field here is used to configure how the gossip service works.
	// In this example, let's say we choose to use the following configurations
	// for those three NodeHost instaces.
	//
	// GossipConfig {
	//   BindAddress: "10.0.0.100:24001",
	//   Seed: []string{10.0.0.200:24001},
	// }
	//
	// GossipConfig {
	//   BindAddress: "10.0.0.200:24001",
	//   Seed: []string{10.0.0.300:24001},
	// }
	//
	// GossipConfig {
	//   BindAddress: "10.0.0.300:24001",
	//   Seed: []string{10.0.0.100:24001},
	// }
	//
	// For those three machines, the gossip component listens on
	// "10.0.0.100:24001", "10.0.0.200:24001" and "10.0.0.300:24001" respectively
	// for incoming gossip messages. The Seed field is a list of known gossip end
	// points the local gossip service will try to talk to. The Seed field doesn't
	// need to include all gossip end points, a few well connected nodes in the
	// gossip network is enough.
	Gossip ConfigGossip `mapstructure:"gossip" json:"gossip" yaml:"gossip" toml:"gossip"`

	// Expert contains options for expert users who are familiar with the internals
	// of Dragonboat. Users are recommended not to use this field unless
	// absoloutely necessary. It is important to note that any change to this field
	// may cause an existing instance unable to restart, it may also cause negative
	// performance impacts.
	Expert ConfigExpert `mapstructure:"expert" json:"expert" yaml:"expert" toml:"expert"`
}

nolint #maligned

func (ConfigNode) GetDGBConfigNodeHost

func (c ConfigNode) GetDGBConfigNodeHost() dgbcfg.NodeHostConfig

func (ConfigNode) Validate

func (c ConfigNode) Validate() liberr.Error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL