Documentation ¶
Index ¶
- Variables
- func RegisterTransportServer(s *grpc.Server, srv TransportServer)
- type Case
- type Checker
- type Etcd
- func (*Etcd) Descriptor() ([]byte, []int)
- func (e *Etcd) Flags() (fs []string)
- func (m *Etcd) Marshal() (dAtA []byte, err error)
- func (m *Etcd) MarshalTo(dAtA []byte) (int, error)
- func (m *Etcd) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Etcd) ProtoMessage()
- func (m *Etcd) Reset()
- func (m *Etcd) Size() (n int)
- func (m *Etcd) String() string
- func (m *Etcd) Unmarshal(dAtA []byte) error
- func (m *Etcd) XXX_DiscardUnknown()
- func (m *Etcd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Etcd) XXX_Merge(src proto.Message)
- func (m *Etcd) XXX_Size() int
- func (m *Etcd) XXX_Unmarshal(b []byte) error
- type Member
- func (m *Member) CheckCompact(rev int64) error
- func (m *Member) Compact(rev int64, timeout time.Duration) error
- func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error)
- func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error)
- func (m *Member) Defrag() error
- func (*Member) Descriptor() ([]byte, []int)
- func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, error)
- func (m *Member) ElectionTimeout() time.Duration
- func (m *Member) IsLeader() (bool, error)
- func (m *Member) Marshal() (dAtA []byte, err error)
- func (m *Member) MarshalTo(dAtA []byte) (int, error)
- func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Member) ProtoMessage()
- func (m *Member) Reset()
- func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error)
- func (m *Member) Rev(ctx context.Context) (int64, error)
- func (m *Member) RevHash() (int64, int64, error)
- func (m *Member) SaveSnapshot(lg *zap.Logger) (err error)
- func (m *Member) Size() (n int)
- func (m *Member) String() string
- func (m *Member) Unmarshal(dAtA []byte) error
- func (m *Member) WriteHealthKey() error
- func (m *Member) XXX_DiscardUnknown()
- func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Member) XXX_Merge(src proto.Message)
- func (m *Member) XXX_Size() int
- func (m *Member) XXX_Unmarshal(b []byte) error
- type Operation
- type Request
- func (*Request) Descriptor() ([]byte, []int)
- func (m *Request) Marshal() (dAtA []byte, err error)
- func (m *Request) MarshalTo(dAtA []byte) (int, error)
- func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Request) ProtoMessage()
- func (m *Request) Reset()
- func (m *Request) Size() (n int)
- func (m *Request) String() string
- func (m *Request) Unmarshal(dAtA []byte) error
- func (m *Request) XXX_DiscardUnknown()
- func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Request) XXX_Merge(src proto.Message)
- func (m *Request) XXX_Size() int
- func (m *Request) XXX_Unmarshal(b []byte) error
- type Response
- func (*Response) Descriptor() ([]byte, []int)
- func (m *Response) Marshal() (dAtA []byte, err error)
- func (m *Response) MarshalTo(dAtA []byte) (int, error)
- func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Response) ProtoMessage()
- func (m *Response) Reset()
- func (m *Response) Size() (n int)
- func (m *Response) String() string
- func (m *Response) Unmarshal(dAtA []byte) error
- func (m *Response) XXX_DiscardUnknown()
- func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Response) XXX_Merge(src proto.Message)
- func (m *Response) XXX_Size() int
- func (m *Response) XXX_Unmarshal(b []byte) error
- type SnapshotInfo
- func (*SnapshotInfo) Descriptor() ([]byte, []int)
- func (m *SnapshotInfo) Marshal() (dAtA []byte, err error)
- func (m *SnapshotInfo) MarshalTo(dAtA []byte) (int, error)
- func (m *SnapshotInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*SnapshotInfo) ProtoMessage()
- func (m *SnapshotInfo) Reset()
- func (m *SnapshotInfo) Size() (n int)
- func (m *SnapshotInfo) String() string
- func (m *SnapshotInfo) Unmarshal(dAtA []byte) error
- func (m *SnapshotInfo) XXX_DiscardUnknown()
- func (m *SnapshotInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *SnapshotInfo) XXX_Merge(src proto.Message)
- func (m *SnapshotInfo) XXX_Size() int
- func (m *SnapshotInfo) XXX_Unmarshal(b []byte) error
- type Stresser
- func (*Stresser) Descriptor() ([]byte, []int)
- func (m *Stresser) Marshal() (dAtA []byte, err error)
- func (m *Stresser) MarshalTo(dAtA []byte) (int, error)
- func (m *Stresser) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Stresser) ProtoMessage()
- func (m *Stresser) Reset()
- func (m *Stresser) Size() (n int)
- func (m *Stresser) String() string
- func (m *Stresser) Unmarshal(dAtA []byte) error
- func (m *Stresser) XXX_DiscardUnknown()
- func (m *Stresser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Stresser) XXX_Merge(src proto.Message)
- func (m *Stresser) XXX_Size() int
- func (m *Stresser) XXX_Unmarshal(b []byte) error
- type StresserType
- type Tester
- func (*Tester) Descriptor() ([]byte, []int)
- func (m *Tester) Marshal() (dAtA []byte, err error)
- func (m *Tester) MarshalTo(dAtA []byte) (int, error)
- func (m *Tester) MarshalToSizedBuffer(dAtA []byte) (int, error)
- func (*Tester) ProtoMessage()
- func (m *Tester) Reset()
- func (m *Tester) Size() (n int)
- func (m *Tester) String() string
- func (m *Tester) Unmarshal(dAtA []byte) error
- func (m *Tester) XXX_DiscardUnknown()
- func (m *Tester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Tester) XXX_Merge(src proto.Message)
- func (m *Tester) XXX_Size() int
- func (m *Tester) XXX_Unmarshal(b []byte) error
- type TransportClient
- type TransportServer
- type Transport_TransportClient
- type Transport_TransportServer
- type UnimplementedTransportServer
Constants ¶
This section is empty.
Variables ¶
var ( ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group") )
var Case_name = map[int32]string{
0: "SIGTERM_ONE_FOLLOWER",
1: "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
2: "SIGTERM_LEADER",
3: "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT",
4: "SIGTERM_QUORUM",
5: "SIGTERM_ALL",
10: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER",
11: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
12: "SIGQUIT_AND_REMOVE_LEADER",
13: "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT",
14: "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH",
100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
105: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
204: "DELAY_PEER_PORT_TX_RX_LEADER",
205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
208: "DELAY_PEER_PORT_TX_RX_QUORUM",
209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
210: "DELAY_PEER_PORT_TX_RX_ALL",
211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
300: "NO_FAIL_WITH_STRESS",
301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
400: "FAILPOINTS",
401: "FAILPOINTS_WITH_DISK_IO_LATENCY",
500: "EXTERNAL",
}
var Case_value = map[string]int32{
"SIGTERM_ONE_FOLLOWER": 0,
"SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1,
"SIGTERM_LEADER": 2,
"SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT": 3,
"SIGTERM_QUORUM": 4,
"SIGTERM_ALL": 5,
"SIGQUIT_AND_REMOVE_ONE_FOLLOWER": 10,
"SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 11,
"SIGQUIT_AND_REMOVE_LEADER": 12,
"SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT": 13,
"SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": 14,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 100,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 101,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER": 102,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 103,
"BLACKHOLE_PEER_PORT_TX_RX_QUORUM": 104,
"BLACKHOLE_PEER_PORT_TX_RX_ALL": 105,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 200,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 201,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 202,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 203,
"DELAY_PEER_PORT_TX_RX_LEADER": 204,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": 205,
"DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 206,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 207,
"DELAY_PEER_PORT_TX_RX_QUORUM": 208,
"RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": 209,
"DELAY_PEER_PORT_TX_RX_ALL": 210,
"RANDOM_DELAY_PEER_PORT_TX_RX_ALL": 211,
"NO_FAIL_WITH_STRESS": 300,
"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 301,
"FAILPOINTS": 400,
"FAILPOINTS_WITH_DISK_IO_LATENCY": 401,
"EXTERNAL": 500,
}
var Checker_name = map[int32]string{
0: "KV_HASH",
1: "LEASE_EXPIRE",
2: "RUNNER",
3: "NO_CHECK",
4: "SHORT_TTL_LEASE_EXPIRE",
}
var Checker_value = map[string]int32{
"KV_HASH": 0,
"LEASE_EXPIRE": 1,
"RUNNER": 2,
"NO_CHECK": 3,
"SHORT_TTL_LEASE_EXPIRE": 4,
}
var Operation_name = map[int32]string{
0: "NOT_STARTED",
10: "INITIAL_START_ETCD",
11: "RESTART_ETCD",
20: "SIGTERM_ETCD",
21: "SIGQUIT_ETCD_AND_REMOVE_DATA",
30: "SAVE_SNAPSHOT",
31: "RESTORE_RESTART_FROM_SNAPSHOT",
32: "RESTART_FROM_SNAPSHOT",
40: "SIGQUIT_ETCD_AND_ARCHIVE_DATA",
41: "SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT",
100: "BLACKHOLE_PEER_PORT_TX_RX",
101: "UNBLACKHOLE_PEER_PORT_TX_RX",
200: "DELAY_PEER_PORT_TX_RX",
201: "UNDELAY_PEER_PORT_TX_RX",
}
var Operation_value = map[string]int32{
"NOT_STARTED": 0,
"INITIAL_START_ETCD": 10,
"RESTART_ETCD": 11,
"SIGTERM_ETCD": 20,
"SIGQUIT_ETCD_AND_REMOVE_DATA": 21,
"SAVE_SNAPSHOT": 30,
"RESTORE_RESTART_FROM_SNAPSHOT": 31,
"RESTART_FROM_SNAPSHOT": 32,
"SIGQUIT_ETCD_AND_ARCHIVE_DATA": 40,
"SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT": 41,
"BLACKHOLE_PEER_PORT_TX_RX": 100,
"UNBLACKHOLE_PEER_PORT_TX_RX": 101,
"DELAY_PEER_PORT_TX_RX": 200,
"UNDELAY_PEER_PORT_TX_RX": 201,
}
var StresserType_name = map[int32]string{
0: "KV_WRITE_SMALL",
1: "KV_WRITE_LARGE",
2: "KV_READ_ONE_KEY",
3: "KV_READ_RANGE",
4: "KV_DELETE_ONE_KEY",
5: "KV_DELETE_RANGE",
6: "KV_TXN_WRITE_DELETE",
10: "LEASE",
20: "ELECTION_RUNNER",
31: "WATCH_RUNNER",
41: "LOCK_RACER_RUNNER",
51: "LEASE_RUNNER",
}
var StresserType_value = map[string]int32{
"KV_WRITE_SMALL": 0,
"KV_WRITE_LARGE": 1,
"KV_READ_ONE_KEY": 2,
"KV_READ_RANGE": 3,
"KV_DELETE_ONE_KEY": 4,
"KV_DELETE_RANGE": 5,
"KV_TXN_WRITE_DELETE": 6,
"LEASE": 10,
"ELECTION_RUNNER": 20,
"WATCH_RUNNER": 31,
"LOCK_RACER_RUNNER": 41,
"LEASE_RUNNER": 51,
}
Functions ¶
func RegisterTransportServer ¶
func RegisterTransportServer(s *grpc.Server, srv TransportServer)
Types ¶
type Case ¶
type Case int32
Case defines various system faults or test case in distributed systems, in order to verify correct behavior of etcd servers and clients.
const ( // SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader) // but does not delete its data directories on disk for next restart. // It waits "delay-ms" before recovering this failure. // The expected behavior is that the follower comes back online // and rejoins the cluster, and then each member continues to process // client requests ('Put' request that requires Raft consensus). Case_SIGTERM_ONE_FOLLOWER Case = 0 // SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen // follower but does not delete its data directories on disk for next // restart. And waits until most up-to-date node (leader) applies the // snapshot count of entries since the stop operation. // The expected behavior is that the follower comes back online and // rejoins the cluster, and then active leader sends snapshot // to the follower to force it to follow the leader's log. // As always, after recovery, each member must be able to process // client requests. Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1 // SIGTERM_LEADER stops the active leader node but does not delete its // data directories on disk for next restart. Then it waits "delay-ms" // before recovering this failure, in order to trigger election timeouts. // The expected behavior is that a new leader gets elected, and the // old leader comes back online and rejoins the cluster as a follower. // As always, after recovery, each member must be able to process // client requests. Case_SIGTERM_LEADER Case = 2 // SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node // but does not delete its data directories on disk for next restart. // And waits until most up-to-date node ("new" leader) applies the // snapshot count of entries since the stop operation. // The expected behavior is that cluster elects a new leader, and the // old leader comes back online and rejoins the cluster as a follower. // And it receives the snapshot from the new leader to overwrite its // store. As always, after recovery, each member must be able to // process client requests. Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3 // SIGTERM_QUORUM stops majority number of nodes to make the whole cluster // inoperable but does not delete data directories on stopped nodes // for next restart. And it waits "delay-ms" before recovering failure. // The expected behavior is that nodes come back online, thus cluster // comes back operative as well. As always, after recovery, each member // must be able to process client requests. Case_SIGTERM_QUORUM Case = 4 // SIGTERM_ALL stops the whole cluster but does not delete data directories // on disk for next restart. And it waits "delay-ms" before recovering // this failure. // The expected behavior is that nodes come back online, thus cluster // comes back operative as well. As always, after recovery, each member // must be able to process client requests. Case_SIGTERM_ALL Case = 5 // SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower // (non-leader), deletes its data directories on disk, and removes // this member from cluster (membership reconfiguration). On recovery, // tester adds a new member, and this member joins the existing cluster // with fresh data. It waits "delay-ms" before recovering this // failure. This simulates destroying one follower machine, where operator // needs to add a new member from a fresh machine. // The expected behavior is that a new member joins the existing cluster, // and then each member continues to process client requests. Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10 // SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly // chosen follower, deletes its data directories on disk, and removes // this member from cluster (membership reconfiguration). On recovery, // tester adds a new member, and this member joins the existing cluster // restart. On member remove, cluster waits until most up-to-date node // (leader) applies the snapshot count of entries since the stop operation. // This simulates destroying a leader machine, where operator needs to add // a new member from a fresh machine. // The expected behavior is that a new member joins the existing cluster, // and receives a snapshot from the active leader. As always, after // recovery, each member must be able to process client requests. Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11 // SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its // data directories on disk, and removes this member from cluster. // On recovery, tester adds a new member, and this member joins the // existing cluster with fresh data. It waits "delay-ms" before // recovering this failure. This simulates destroying a leader machine, // where operator needs to add a new member from a fresh machine. // The expected behavior is that a new member joins the existing cluster, // and then each member continues to process client requests. Case_SIGQUIT_AND_REMOVE_LEADER Case = 12 // SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader, // deletes its data directories on disk, and removes this member from // cluster (membership reconfiguration). On recovery, tester adds a new // member, and this member joins the existing cluster restart. On member // remove, cluster waits until most up-to-date node (new leader) applies // the snapshot count of entries since the stop operation. This simulates // destroying a leader machine, where operator needs to add a new member // from a fresh machine. // The expected behavior is that on member remove, cluster elects a new // leader, and a new member joins the existing cluster and receives a // snapshot from the newly elected leader. As always, after recovery, each // member must be able to process client requests. Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13 // SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first // stops majority number of nodes, deletes data directories on those quorum // nodes, to make the whole cluster inoperable. Now that quorum and their // data are totally destroyed, cluster cannot even remove unavailable nodes // (e.g. 2 out of 3 are lost, so no leader can be elected). // Let's assume 3-node cluster of node A, B, and C. One day, node A and B // are destroyed and all their data are gone. The only viable solution is // to recover from C's latest snapshot. // // To simulate: // 1. Assume node C is the current leader with most up-to-date data. // 2. Download snapshot from node C, before destroying node A and B. // 3. Destroy node A and B, and make the whole cluster inoperable. // 4. Now node C cannot operate either. // 5. SIGTERM node C and remove its data directories. // 6. Restore a new seed member from node C's latest snapshot file. // 7. Add another member to establish 2-node cluster. // 8. Add another member to establish 3-node cluster. // 9. Add more if any. // // The expected behavior is that etcd successfully recovers from such // disastrous situation as only 1-node survives out of 3-node cluster, // new members joins the existing cluster, and previous data from snapshot // are still preserved after recovery process. As always, after recovery, // each member must be able to process client requests. Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14 // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming // packets from/to the peer port on a randomly chosen follower // (non-leader), and waits for "delay-ms" until recovery. // The expected behavior is that once dropping operation is undone, // each member must be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100 // BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops // all outgoing/incoming packets from/to the peer port on a randomly // chosen follower (non-leader), and waits for most up-to-date node // (leader) applies the snapshot count of entries since the blackhole // operation. // The expected behavior is that once packet drop operation is undone, // the slow follower tries to catch up, possibly receiving the snapshot // from the active leader. As always, after recovery, each member must // be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101 // BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets // from/to the peer port on the active leader (isolated), and waits for // "delay-ms" until recovery, in order to trigger election timeout. // The expected behavior is that after election timeout, a new leader gets // elected, and once dropping operation is undone, the old leader comes // back and rejoins the cluster as a follower. As always, after recovery, // each member must be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102 // BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all // outgoing/incoming packets from/to the peer port on the active leader, // and waits for most up-to-date node (leader) applies the snapshot // count of entries since the blackhole operation. // The expected behavior is that cluster elects a new leader, and once // dropping operation is undone, the old leader comes back and rejoins // the cluster as a follower. The slow follower tries to catch up, likely // receiving the snapshot from the new active leader. As always, after // recovery, each member must be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103 // BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets // from/to the peer ports on majority nodes of cluster, thus losing its // leader and cluster being inoperable. And it waits for "delay-ms" // until recovery. // The expected behavior is that once packet drop operation is undone, // nodes come back online, thus cluster comes back operative. As always, // after recovery, each member must be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104 // BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets // from/to the peer ports on all nodes, thus making cluster totally // inoperable. It waits for "delay-ms" until recovery. // The expected behavior is that once packet drop operation is undone, // nodes come back online, thus cluster comes back operative. As always, // after recovery, each member must be able to process client requests. Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105 // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets // from/to the peer port on a randomly chosen follower (non-leader). // It waits for "delay-ms" until recovery. // The expected behavior is that once packet delay operation is undone, // the follower comes back and tries to catch up with latest changes from // cluster. And as always, after recovery, each member must be able to // process client requests. Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200 // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming // packets from/to the peer port on a randomly chosen follower // (non-leader) with a randomized time duration (thus isolated). It // waits for "delay-ms" until recovery. // The expected behavior is that once packet delay operation is undone, // each member must be able to process client requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201 // DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays // outgoing/incoming packets from/to the peer port on a randomly chosen // follower (non-leader), and waits for most up-to-date node (leader) // applies the snapshot count of entries since the delay operation. // The expected behavior is that the delayed follower gets isolated // and behind the current active leader, and once delay operation is undone, // the slow follower comes back and catches up possibly receiving snapshot // from the active leader. As always, after recovery, each member must be // able to process client requests. Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202 // RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays // outgoing/incoming packets from/to the peer port on a randomly chosen // follower (non-leader) with a randomized time duration, and waits for // most up-to-date node (leader) applies the snapshot count of entries // since the delay operation. // The expected behavior is that the delayed follower gets isolated // and behind the current active leader, and once delay operation is undone, // the slow follower comes back and catches up, possibly receiving a // snapshot from the active leader. As always, after recovery, each member // must be able to process client requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203 // DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to // the peer port on the active leader. And waits for "delay-ms" until // recovery. // The expected behavior is that cluster may elect a new leader, and // once packet delay operation is undone, the (old) leader comes back // and tries to catch up with latest changes from cluster. As always, // after recovery, each member must be able to process client requests. Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204 // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets // from/to the peer port on the active leader with a randomized time // duration. And waits for "delay-ms" until recovery. // The expected behavior is that cluster may elect a new leader, and // once packet delay operation is undone, the (old) leader comes back // and tries to catch up with latest changes from cluster. As always, // after recovery, each member must be able to process client requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205 // DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays // outgoing/incoming packets from/to the peer port on the active leader, // and waits for most up-to-date node (current or new leader) applies the // snapshot count of entries since the delay operation. // The expected behavior is that cluster may elect a new leader, and // the old leader gets isolated and behind the current active leader, // and once delay operation is undone, the slow follower comes back // and catches up, likely receiving a snapshot from the active leader. // As always, after recovery, each member must be able to process client // requests. Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206 // RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays // outgoing/incoming packets from/to the peer port on the active leader, // with a randomized time duration. And it waits for most up-to-date node // (current or new leader) applies the snapshot count of entries since the // delay operation. // The expected behavior is that cluster may elect a new leader, and // the old leader gets isolated and behind the current active leader, // and once delay operation is undone, the slow follower comes back // and catches up, likely receiving a snapshot from the active leader. // As always, after recovery, each member must be able to process client // requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207 // DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to // the peer ports on majority nodes of cluster. And it waits for // "delay-ms" until recovery, likely to trigger election timeouts. // The expected behavior is that cluster may elect a new leader, while // quorum of nodes struggle with slow networks, and once delay operation // is undone, nodes come back and cluster comes back operative. As always, // after recovery, each member must be able to process client requests. Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208 // RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets // from/to the peer ports on majority nodes of cluster, with randomized // time durations. And it waits for "delay-ms" until recovery, likely // to trigger election timeouts. // The expected behavior is that cluster may elect a new leader, while // quorum of nodes struggle with slow networks, and once delay operation // is undone, nodes come back and cluster comes back operative. As always, // after recovery, each member must be able to process client requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209 // DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the // peer ports on all nodes. And it waits for "delay-ms" until recovery, // likely to trigger election timeouts. // The expected behavior is that cluster may become totally inoperable, // struggling with slow networks across the whole cluster. Once delay // operation is undone, nodes come back and cluster comes back operative. // As always, after recovery, each member must be able to process client // requests. Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210 // RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets // from/to the peer ports on all nodes, with randomized time durations. // And it waits for "delay-ms" until recovery, likely to trigger // election timeouts. // The expected behavior is that cluster may become totally inoperable, // struggling with slow networks across the whole cluster. Once delay // operation is undone, nodes come back and cluster comes back operative. // As always, after recovery, each member must be able to process client // requests. Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211 // NO_FAIL_WITH_STRESS stops injecting failures while testing the // consistency and correctness under pressure loads, for the duration of // "delay-ms". Goal is to ensure cluster be still making progress // on recovery, and verify system does not deadlock following a sequence // of failure injections. // The expected behavior is that cluster remains fully operative in healthy // condition. As always, after recovery, each member must be able to process // client requests. Case_NO_FAIL_WITH_STRESS Case = 300 // NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor // sends stressig client requests to the cluster, for the duration of // "delay-ms". Goal is to ensure cluster be still making progress // on recovery, and verify system does not deadlock following a sequence // of failure injections. // The expected behavior is that cluster remains fully operative in healthy // condition, and clients requests during liveness period succeed without // errors. // Note: this is how Google Chubby does failure injection testing // https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf. Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301 // FAILPOINTS injects failpoints to etcd server runtime, triggering panics // in critical code paths. Case_FAILPOINTS Case = 400 // FAILPOINTS_WITH_DISK_IO_LATENCY injects high disk I/O latency failure in raftAfterSave code paths. Case_FAILPOINTS_WITH_DISK_IO_LATENCY Case = 401 // EXTERNAL runs external failure injection scripts. Case_EXTERNAL Case = 500 )
func (Case) EnumDescriptor ¶
type Etcd ¶
type Etcd struct { Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"` DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"` WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"` // HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval. // Default value is 100, which is 100ms. HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"` // ElectionTimeoutMs is the time (in milliseconds) for an election to timeout. // Default value is 1000, which is 1s. ElectionTimeoutMs int64 `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"` ListenClientURLs []string `protobuf:"bytes,21,rep,name=ListenClientURLs,proto3" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"` AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs,proto3" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"` ClientAutoTLS bool `protobuf:"varint,23,opt,name=ClientAutoTLS,proto3" json:"ClientAutoTLS,omitempty" yaml:"auto-tls"` ClientCertAuth bool `protobuf:"varint,24,opt,name=ClientCertAuth,proto3" json:"ClientCertAuth,omitempty" yaml:"client-cert-auth"` ClientCertFile string `protobuf:"bytes,25,opt,name=ClientCertFile,proto3" json:"ClientCertFile,omitempty" yaml:"cert-file"` ClientKeyFile string `protobuf:"bytes,26,opt,name=ClientKeyFile,proto3" json:"ClientKeyFile,omitempty" yaml:"key-file"` ClientTrustedCAFile string `protobuf:"bytes,27,opt,name=ClientTrustedCAFile,proto3" json:"ClientTrustedCAFile,omitempty" yaml:"trusted-ca-file"` ListenPeerURLs []string `protobuf:"bytes,31,rep,name=ListenPeerURLs,proto3" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"` AdvertisePeerURLs []string `protobuf:"bytes,32,rep,name=AdvertisePeerURLs,proto3" json:"AdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"` PeerAutoTLS bool `protobuf:"varint,33,opt,name=PeerAutoTLS,proto3" json:"PeerAutoTLS,omitempty" yaml:"peer-auto-tls"` PeerClientCertAuth bool `protobuf:"varint,34,opt,name=PeerClientCertAuth,proto3" json:"PeerClientCertAuth,omitempty" yaml:"peer-client-cert-auth"` PeerCertFile string `protobuf:"bytes,35,opt,name=PeerCertFile,proto3" json:"PeerCertFile,omitempty" yaml:"peer-cert-file"` PeerKeyFile string `protobuf:"bytes,36,opt,name=PeerKeyFile,proto3" json:"PeerKeyFile,omitempty" yaml:"peer-key-file"` PeerTrustedCAFile string `protobuf:"bytes,37,opt,name=PeerTrustedCAFile,proto3" json:"PeerTrustedCAFile,omitempty" yaml:"peer-trusted-ca-file"` InitialCluster string `protobuf:"bytes,41,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"` InitialClusterState string `protobuf:"bytes,42,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"` InitialClusterToken string `protobuf:"bytes,43,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"` SnapshotCount int64 `protobuf:"varint,51,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"` QuotaBackendBytes int64 `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"` PreVote bool `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"` InitialCorruptCheck bool `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"` Logger string `protobuf:"bytes,71,opt,name=Logger,proto3" json:"Logger,omitempty" yaml:"logger"` // LogOutputs is the log file to store current etcd server logs. LogOutputs []string `protobuf:"bytes,72,rep,name=LogOutputs,proto3" json:"LogOutputs,omitempty" yaml:"log-outputs"` LogLevel string `protobuf:"bytes,73,opt,name=LogLevel,proto3" json:"LogLevel,omitempty" yaml:"log-level"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Etcd) Descriptor ¶
func (*Etcd) ProtoMessage ¶
func (*Etcd) ProtoMessage()
func (*Etcd) XXX_DiscardUnknown ¶
func (m *Etcd) XXX_DiscardUnknown()
func (*Etcd) XXX_Unmarshal ¶
type Member ¶
type Member struct { // EtcdExec is the executable etcd binary path in agent server. EtcdExec string `protobuf:"bytes,1,opt,name=EtcdExec,proto3" json:"EtcdExec,omitempty" yaml:"etcd-exec"` // AgentAddr is the agent HTTP server address. AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"` // FailpointHTTPAddr is the agent's failpoints HTTP server address. FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"` // BaseDir is the base directory where all logs and etcd data are stored. BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"` // EtcdClientProxy is true when client traffic needs to be proxied. // If true, listen client URL port must be different than advertise client URL port. EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"` // EtcdPeerProxy is true when peer traffic needs to be proxied. // If true, listen peer URL port must be different than advertise peer URL port. EtcdPeerProxy bool `protobuf:"varint,202,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"` // EtcdClientEndpoint is the etcd client endpoint. EtcdClientEndpoint string `protobuf:"bytes,301,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"` // Etcd defines etcd binary configuration flags. Etcd *Etcd `protobuf:"bytes,302,opt,name=Etcd,proto3" json:"Etcd,omitempty" yaml:"etcd"` // EtcdOnSnapshotRestore defines one-time use configuration during etcd // snapshot recovery process. EtcdOnSnapshotRestore *Etcd `protobuf:"bytes,303,opt,name=EtcdOnSnapshotRestore,proto3" json:"EtcdOnSnapshotRestore,omitempty"` // ClientCertData contains cert file contents from this member's etcd server. ClientCertData string `protobuf:"bytes,401,opt,name=ClientCertData,proto3" json:"ClientCertData,omitempty" yaml:"client-cert-data"` ClientCertPath string `protobuf:"bytes,402,opt,name=ClientCertPath,proto3" json:"ClientCertPath,omitempty" yaml:"client-cert-path"` // ClientKeyData contains key file contents from this member's etcd server. ClientKeyData string `protobuf:"bytes,403,opt,name=ClientKeyData,proto3" json:"ClientKeyData,omitempty" yaml:"client-key-data"` ClientKeyPath string `protobuf:"bytes,404,opt,name=ClientKeyPath,proto3" json:"ClientKeyPath,omitempty" yaml:"client-key-path"` // ClientTrustedCAData contains trusted CA file contents from this member's etcd server. ClientTrustedCAData string `protobuf:"bytes,405,opt,name=ClientTrustedCAData,proto3" json:"ClientTrustedCAData,omitempty" yaml:"client-trusted-ca-data"` ClientTrustedCAPath string `protobuf:"bytes,406,opt,name=ClientTrustedCAPath,proto3" json:"ClientTrustedCAPath,omitempty" yaml:"client-trusted-ca-path"` // PeerCertData contains cert file contents from this member's etcd server. PeerCertData string `protobuf:"bytes,501,opt,name=PeerCertData,proto3" json:"PeerCertData,omitempty" yaml:"peer-cert-data"` PeerCertPath string `protobuf:"bytes,502,opt,name=PeerCertPath,proto3" json:"PeerCertPath,omitempty" yaml:"peer-cert-path"` // PeerKeyData contains key file contents from this member's etcd server. PeerKeyData string `protobuf:"bytes,503,opt,name=PeerKeyData,proto3" json:"PeerKeyData,omitempty" yaml:"peer-key-data"` PeerKeyPath string `protobuf:"bytes,504,opt,name=PeerKeyPath,proto3" json:"PeerKeyPath,omitempty" yaml:"peer-key-path"` // PeerTrustedCAData contains trusted CA file contents from this member's etcd server. PeerTrustedCAData string `protobuf:"bytes,505,opt,name=PeerTrustedCAData,proto3" json:"PeerTrustedCAData,omitempty" yaml:"peer-trusted-ca-data"` PeerTrustedCAPath string `protobuf:"bytes,506,opt,name=PeerTrustedCAPath,proto3" json:"PeerTrustedCAPath,omitempty" yaml:"peer-trusted-ca-path"` // SnapshotPath is the snapshot file path to store or restore from. SnapshotPath string `protobuf:"bytes,601,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty" yaml:"snapshot-path"` // SnapshotInfo contains last SAVE_SNAPSHOT request results. SnapshotInfo *SnapshotInfo `protobuf:"bytes,602,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"` // Failpoints is the GOFAIL_FAILPOINTS environment variable value to use when starting etcd. Failpoints string `protobuf:"bytes,701,opt,name=Failpoints,proto3" json:"Failpoints,omitempty" yaml:"failpoints"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Member) CheckCompact ¶
CheckCompact ensures that historical data before given revision has been compacted.
func (*Member) Compact ¶
Compact compacts member storage with given revision. It blocks until it's physically done.
func (*Member) CreateEtcdClient ¶
func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error)
CreateEtcdClient creates a client from member.
func (*Member) CreateEtcdClientConfig ¶
func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error)
CreateEtcdClientConfig creates a client configuration from member.
func (*Member) Descriptor ¶
func (*Member) DialEtcdGRPCServer ¶
func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, error)
DialEtcdGRPCServer creates a raw gRPC connection to an etcd member.
func (*Member) ElectionTimeout ¶
ElectionTimeout returns an election timeout duration.
func (*Member) MarshalToSizedBuffer ¶
func (*Member) ProtoMessage ¶
func (*Member) ProtoMessage()
func (*Member) RestoreSnapshot ¶
RestoreSnapshot restores a cluster from a given snapshot file on disk. It's meant to requested remotely, so that local member can load the snapshot file from local disk.
func (*Member) SaveSnapshot ¶
SaveSnapshot downloads a snapshot file from this member, locally. It's meant to requested remotely, so that local member can store snapshot file on local disk.
func (*Member) WriteHealthKey ¶
WriteHealthKey writes a health key to this member.
func (*Member) XXX_DiscardUnknown ¶
func (m *Member) XXX_DiscardUnknown()
func (*Member) XXX_Marshal ¶
func (*Member) XXX_Unmarshal ¶
type Operation ¶
type Operation int32
const ( // NOT_STARTED is the agent status before etcd first start. Operation_NOT_STARTED Operation = 0 // INITIAL_START_ETCD is only called to start etcd, the very first time. Operation_INITIAL_START_ETCD Operation = 10 // RESTART_ETCD is sent to restart killed etcd. Operation_RESTART_ETCD Operation = 11 // SIGTERM_ETCD pauses etcd process while keeping data directories // and previous etcd configurations. Operation_SIGTERM_ETCD Operation = 20 // SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data // directories to simulate destroying the whole machine. Operation_SIGQUIT_ETCD_AND_REMOVE_DATA Operation = 21 // SAVE_SNAPSHOT is sent to trigger local member to download its snapshot // onto its local disk with the specified path from tester. Operation_SAVE_SNAPSHOT Operation = 30 // RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to // restore a cluster from existing snapshot from disk, and restart // an etcd instance from recovered data. Operation_RESTORE_RESTART_FROM_SNAPSHOT Operation = 31 // RESTART_FROM_SNAPSHOT is sent to trigger local member to restart // and join an existing cluster that has been recovered from a snapshot. // Local member joins this cluster with fresh data. Operation_RESTART_FROM_SNAPSHOT Operation = 32 // SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed, // thus need to archive etcd data directories. Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA Operation = 40 // SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT destroys etcd process, // etcd data, and agent server. Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT Operation = 41 // BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to // the peer port on target member's peer port. Operation_BLACKHOLE_PEER_PORT_TX_RX Operation = 100 // UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping. Operation_UNBLACKHOLE_PEER_PORT_TX_RX Operation = 101 // DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to // the peer port on target member's peer port. Operation_DELAY_PEER_PORT_TX_RX Operation = 200 // UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays. Operation_UNDELAY_PEER_PORT_TX_RX Operation = 201 )
func (Operation) EnumDescriptor ¶
type Request ¶
type Request struct { Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"` // Member contains the same Member object from tester configuration. Member *Member `protobuf:"bytes,2,opt,name=Member,proto3" json:"Member,omitempty"` // Tester contains tester configuration. Tester *Tester `protobuf:"bytes,3,opt,name=Tester,proto3" json:"Tester,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Request) Descriptor ¶
func (*Request) MarshalToSizedBuffer ¶
func (*Request) ProtoMessage ¶
func (*Request) ProtoMessage()
func (*Request) XXX_DiscardUnknown ¶
func (m *Request) XXX_DiscardUnknown()
func (*Request) XXX_Marshal ¶
func (*Request) XXX_Unmarshal ¶
type Response ¶
type Response struct { Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"` Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` // Member contains the same Member object from tester request. Member *Member `protobuf:"bytes,3,opt,name=Member,proto3" json:"Member,omitempty"` // SnapshotInfo contains SAVE_SNAPSHOT request results. SnapshotInfo *SnapshotInfo `protobuf:"bytes,4,opt,name=SnapshotInfo,proto3" json:"SnapshotInfo,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Response) Descriptor ¶
func (*Response) MarshalToSizedBuffer ¶
func (*Response) ProtoMessage ¶
func (*Response) ProtoMessage()
func (*Response) XXX_DiscardUnknown ¶
func (m *Response) XXX_DiscardUnknown()
func (*Response) XXX_Marshal ¶
func (*Response) XXX_Unmarshal ¶
type SnapshotInfo ¶
type SnapshotInfo struct { MemberName string `protobuf:"bytes,1,opt,name=MemberName,proto3" json:"MemberName,omitempty"` MemberClientURLs []string `protobuf:"bytes,2,rep,name=MemberClientURLs,proto3" json:"MemberClientURLs,omitempty"` SnapshotPath string `protobuf:"bytes,3,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty"` SnapshotFileSize string `protobuf:"bytes,4,opt,name=SnapshotFileSize,proto3" json:"SnapshotFileSize,omitempty"` SnapshotTotalSize string `protobuf:"bytes,5,opt,name=SnapshotTotalSize,proto3" json:"SnapshotTotalSize,omitempty"` SnapshotTotalKey int64 `protobuf:"varint,6,opt,name=SnapshotTotalKey,proto3" json:"SnapshotTotalKey,omitempty"` SnapshotHash int64 `protobuf:"varint,7,opt,name=SnapshotHash,proto3" json:"SnapshotHash,omitempty"` SnapshotRevision int64 `protobuf:"varint,8,opt,name=SnapshotRevision,proto3" json:"SnapshotRevision,omitempty"` Took string `protobuf:"bytes,9,opt,name=Took,proto3" json:"Took,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
SnapshotInfo contains SAVE_SNAPSHOT request results.
func (*SnapshotInfo) Descriptor ¶
func (*SnapshotInfo) Descriptor() ([]byte, []int)
func (*SnapshotInfo) Marshal ¶
func (m *SnapshotInfo) Marshal() (dAtA []byte, err error)
func (*SnapshotInfo) MarshalToSizedBuffer ¶
func (m *SnapshotInfo) MarshalToSizedBuffer(dAtA []byte) (int, error)
func (*SnapshotInfo) ProtoMessage ¶
func (*SnapshotInfo) ProtoMessage()
func (*SnapshotInfo) Reset ¶
func (m *SnapshotInfo) Reset()
func (*SnapshotInfo) Size ¶
func (m *SnapshotInfo) Size() (n int)
func (*SnapshotInfo) String ¶
func (m *SnapshotInfo) String() string
func (*SnapshotInfo) Unmarshal ¶
func (m *SnapshotInfo) Unmarshal(dAtA []byte) error
func (*SnapshotInfo) XXX_DiscardUnknown ¶
func (m *SnapshotInfo) XXX_DiscardUnknown()
func (*SnapshotInfo) XXX_Marshal ¶
func (m *SnapshotInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*SnapshotInfo) XXX_Merge ¶
func (m *SnapshotInfo) XXX_Merge(src proto.Message)
func (*SnapshotInfo) XXX_Size ¶
func (m *SnapshotInfo) XXX_Size() int
func (*SnapshotInfo) XXX_Unmarshal ¶
func (m *SnapshotInfo) XXX_Unmarshal(b []byte) error
type Stresser ¶
type Stresser struct { Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty" yaml:"type"` Weight float64 `protobuf:"fixed64,2,opt,name=Weight,proto3" json:"Weight,omitempty" yaml:"weight"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Stresser) Descriptor ¶
func (*Stresser) MarshalToSizedBuffer ¶
func (*Stresser) ProtoMessage ¶
func (*Stresser) ProtoMessage()
func (*Stresser) XXX_DiscardUnknown ¶
func (m *Stresser) XXX_DiscardUnknown()
func (*Stresser) XXX_Marshal ¶
func (*Stresser) XXX_Unmarshal ¶
type StresserType ¶
type StresserType int32
const ( StresserType_KV_WRITE_SMALL StresserType = 0 StresserType_KV_WRITE_LARGE StresserType = 1 StresserType_KV_READ_ONE_KEY StresserType = 2 StresserType_KV_READ_RANGE StresserType = 3 StresserType_KV_DELETE_ONE_KEY StresserType = 4 StresserType_KV_DELETE_RANGE StresserType = 5 StresserType_KV_TXN_WRITE_DELETE StresserType = 6 StresserType_LEASE StresserType = 10 StresserType_ELECTION_RUNNER StresserType = 20 StresserType_WATCH_RUNNER StresserType = 31 StresserType_LOCK_RACER_RUNNER StresserType = 41 StresserType_LEASE_RUNNER StresserType = 51 )
func (StresserType) EnumDescriptor ¶
func (StresserType) EnumDescriptor() ([]byte, []int)
func (StresserType) String ¶
func (x StresserType) String() string
type Tester ¶
type Tester struct { DataDir string `protobuf:"bytes,1,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"` Network string `protobuf:"bytes,2,opt,name=Network,proto3" json:"Network,omitempty" yaml:"network"` Addr string `protobuf:"bytes,3,opt,name=Addr,proto3" json:"Addr,omitempty" yaml:"addr"` // DelayLatencyMsRv is the delay latency in milliseconds, // to inject to simulated slow network. DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"` // DelayLatencyMsRv is the delay latency random variable in milliseconds. DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"` // UpdatedDelayLatencyMs is the update delay latency in milliseconds, // to inject to simulated slow network. It's the final latency to apply, // in case the latency numbers are randomly generated from given delay latency field. UpdatedDelayLatencyMs uint32 `` /* 129-byte string literal not displayed */ // RoundLimit is the limit of rounds to run failure set (-1 to run without limits). RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"` // ExitOnCaseFail is true, then exit tester on first failure. ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"` // EnablePprof is true to enable profiler. EnablePprof bool `protobuf:"varint,23,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"` // CaseDelayMs is the delay duration after failure is injected. // Useful when triggering snapshot or no-op failure cases. CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"` // CaseShuffle is true to randomize failure injecting order. CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"` // Cases is the selected test cases to schedule. // If empty, run all failure cases. Cases []string `protobuf:"bytes,33,rep,name=Cases,proto3" json:"Cases,omitempty" yaml:"cases"` // FailpointCommands is the list of "gofail" commands // (e.g. panic("etcd-tester"),1*sleep(1000). FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands,proto3" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"` // RunnerExecPath is a path of etcd-runner binary. RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"` // ExternalExecPath is a path of script for enabling/disabling an external fault injector. ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"` // Stressers is the list of stresser types: // KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER. Stressers []*Stresser `protobuf:"bytes,101,rep,name=Stressers,proto3" json:"Stressers,omitempty" yaml:"stressers"` // Checkers is the list of consistency checker types: // KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER. // Leave empty to skip consistency checks. Checkers []string `protobuf:"bytes,102,rep,name=Checkers,proto3" json:"Checkers,omitempty" yaml:"checkers"` // StressKeySize is the size of each small key written into etcd. StressKeySize int32 `protobuf:"varint,201,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"` // StressKeySizeLarge is the size of each large key written into etcd. StressKeySizeLarge int32 `protobuf:"varint,202,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"` // StressKeySuffixRange is the count of key range written into etcd. // Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)". StressKeySuffixRange int32 `` /* 127-byte string literal not displayed */ // StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100). // Stress keys are created with "fmt.Sprintf("/k%03d", i)". StressKeySuffixRangeTxn int32 `` /* 137-byte string literal not displayed */ // StressKeyTxnOps is the number of operations per a transaction (max 64). StressKeyTxnOps int32 `protobuf:"varint,205,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"` // StressClients is the number of concurrent stressing clients // with "one" shared TCP connection. StressClients int32 `protobuf:"varint,301,opt,name=StressClients,proto3" json:"StressClients,omitempty" yaml:"stress-clients"` // StressQPS is the maximum number of stresser requests per second. StressQPS int32 `protobuf:"varint,302,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Tester) Descriptor ¶
func (*Tester) MarshalToSizedBuffer ¶
func (*Tester) ProtoMessage ¶
func (*Tester) ProtoMessage()
func (*Tester) XXX_DiscardUnknown ¶
func (m *Tester) XXX_DiscardUnknown()
func (*Tester) XXX_Marshal ¶
func (*Tester) XXX_Unmarshal ¶
type TransportClient ¶
type TransportClient interface {
Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error)
}
TransportClient is the client API for Transport service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewTransportClient ¶
func NewTransportClient(cc *grpc.ClientConn) TransportClient
type TransportServer ¶
type TransportServer interface {
Transport(Transport_TransportServer) error
}
TransportServer is the server API for Transport service.
type UnimplementedTransportServer ¶
type UnimplementedTransportServer struct { }
UnimplementedTransportServer can be embedded to have forward compatible implementations.
func (*UnimplementedTransportServer) Transport ¶
func (*UnimplementedTransportServer) Transport(srv Transport_TransportServer) error