rpcpb

package
v3.3.27+incompatible Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 15, 2021 License: Apache-2.0 Imports: 21 Imported by: 104

Documentation

Overview

Package rpcpb is a generated protocol buffer package.

It is generated from these files:

rpcpb/rpc.proto

It has these top-level messages:

Request
SnapshotInfo
Response
Member
Tester
Etcd

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
)
View Source
var Case_name = map[int32]string{
	0:   "SIGTERM_ONE_FOLLOWER",
	1:   "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
	2:   "SIGTERM_LEADER",
	3:   "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT",
	4:   "SIGTERM_QUORUM",
	5:   "SIGTERM_ALL",
	10:  "SIGQUIT_AND_REMOVE_ONE_FOLLOWER",
	11:  "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
	12:  "SIGQUIT_AND_REMOVE_LEADER",
	13:  "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT",
	14:  "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH",
	100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
	101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
	102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
	103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
	104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
	105: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
	200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
	201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
	202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
	203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
	204: "DELAY_PEER_PORT_TX_RX_LEADER",
	205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
	206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
	207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
	208: "DELAY_PEER_PORT_TX_RX_QUORUM",
	209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
	210: "DELAY_PEER_PORT_TX_RX_ALL",
	211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
	300: "NO_FAIL_WITH_STRESS",
	301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
	400: "FAILPOINTS",
	500: "EXTERNAL",
}
View Source
var Case_value = map[string]int32{
	"SIGTERM_ONE_FOLLOWER":                                               0,
	"SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":                        1,
	"SIGTERM_LEADER":                                                     2,
	"SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT":                              3,
	"SIGTERM_QUORUM":                                                     4,
	"SIGTERM_ALL":                                                        5,
	"SIGQUIT_AND_REMOVE_ONE_FOLLOWER":                                    10,
	"SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":             11,
	"SIGQUIT_AND_REMOVE_LEADER":                                          12,
	"SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT":                   13,
	"SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": 14,
	"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER":                             100,
	"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":      101,
	"BLACKHOLE_PEER_PORT_TX_RX_LEADER":                                   102,
	"BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":            103,
	"BLACKHOLE_PEER_PORT_TX_RX_QUORUM":                                   104,
	"BLACKHOLE_PEER_PORT_TX_RX_ALL":                                      105,
	"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":                                 200,
	"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER":                          201,
	"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":          202,
	"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT":   203,
	"DELAY_PEER_PORT_TX_RX_LEADER":                                       204,
	"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER":                                205,
	"DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":                206,
	"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT":         207,
	"DELAY_PEER_PORT_TX_RX_QUORUM":                                       208,
	"RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM":                                209,
	"DELAY_PEER_PORT_TX_RX_ALL":                                          210,
	"RANDOM_DELAY_PEER_PORT_TX_RX_ALL":                                   211,
	"NO_FAIL_WITH_STRESS":                                                300,
	"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS":                                301,
	"FAILPOINTS":                                                         400,
	"EXTERNAL":                                                           500,
}
View Source
var Checker_name = map[int32]string{
	0: "KV_HASH",
	1: "LEASE_EXPIRE",
	2: "RUNNER",
	3: "NO_CHECK",
}
View Source
var Checker_value = map[string]int32{
	"KV_HASH":      0,
	"LEASE_EXPIRE": 1,
	"RUNNER":       2,
	"NO_CHECK":     3,
}
View Source
var Operation_name = map[int32]string{
	0:   "NOT_STARTED",
	10:  "INITIAL_START_ETCD",
	11:  "RESTART_ETCD",
	20:  "SIGTERM_ETCD",
	21:  "SIGQUIT_ETCD_AND_REMOVE_DATA",
	30:  "SAVE_SNAPSHOT",
	31:  "RESTORE_RESTART_FROM_SNAPSHOT",
	32:  "RESTART_FROM_SNAPSHOT",
	40:  "SIGQUIT_ETCD_AND_ARCHIVE_DATA",
	41:  "SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT",
	100: "BLACKHOLE_PEER_PORT_TX_RX",
	101: "UNBLACKHOLE_PEER_PORT_TX_RX",
	200: "DELAY_PEER_PORT_TX_RX",
	201: "UNDELAY_PEER_PORT_TX_RX",
}
View Source
var Operation_value = map[string]int32{
	"NOT_STARTED":                                 0,
	"INITIAL_START_ETCD":                          10,
	"RESTART_ETCD":                                11,
	"SIGTERM_ETCD":                                20,
	"SIGQUIT_ETCD_AND_REMOVE_DATA":                21,
	"SAVE_SNAPSHOT":                               30,
	"RESTORE_RESTART_FROM_SNAPSHOT":               31,
	"RESTART_FROM_SNAPSHOT":                       32,
	"SIGQUIT_ETCD_AND_ARCHIVE_DATA":               40,
	"SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT": 41,
	"BLACKHOLE_PEER_PORT_TX_RX":                   100,
	"UNBLACKHOLE_PEER_PORT_TX_RX":                 101,
	"DELAY_PEER_PORT_TX_RX":                       200,
	"UNDELAY_PEER_PORT_TX_RX":                     201,
}
View Source
var Stresser_name = map[int32]string{
	0: "KV",
	1: "LEASE",
	2: "ELECTION_RUNNER",
	3: "WATCH_RUNNER",
	4: "LOCK_RACER_RUNNER",
	5: "LEASE_RUNNER",
}
View Source
var Stresser_value = map[string]int32{
	"KV":                0,
	"LEASE":             1,
	"ELECTION_RUNNER":   2,
	"WATCH_RUNNER":      3,
	"LOCK_RACER_RUNNER": 4,
	"LEASE_RUNNER":      5,
}

Functions

func RegisterTransportServer

func RegisterTransportServer(s *grpc.Server, srv TransportServer)

Types

type Case

type Case int32

Case defines various system faults or test case in distributed systems, in order to verify correct behavior of etcd servers and clients.

const (
	// SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
	// but does not delete its data directories on disk for next restart.
	// It waits "delay-ms" before recovering this failure.
	// The expected behavior is that the follower comes back online
	// and rejoins the cluster, and then each member continues to process
	// client requests ('Put' request that requires Raft consensus).
	Case_SIGTERM_ONE_FOLLOWER Case = 0
	// SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen
	// follower but does not delete its data directories on disk for next
	// restart. And waits until most up-to-date node (leader) applies the
	// snapshot count of entries since the stop operation.
	// The expected behavior is that the follower comes back online and
	// rejoins the cluster, and then active leader sends snapshot
	// to the follower to force it to follow the leader's log.
	// As always, after recovery, each member must be able to process
	// client requests.
	Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1
	// SIGTERM_LEADER stops the active leader node but does not delete its
	// data directories on disk for next restart. Then it waits "delay-ms"
	// before recovering this failure, in order to trigger election timeouts.
	// The expected behavior is that a new leader gets elected, and the
	// old leader comes back online and rejoins the cluster as a follower.
	// As always, after recovery, each member must be able to process
	// client requests.
	Case_SIGTERM_LEADER Case = 2
	// SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node
	// but does not delete its data directories on disk for next restart.
	// And waits until most up-to-date node ("new" leader) applies the
	// snapshot count of entries since the stop operation.
	// The expected behavior is that cluster elects a new leader, and the
	// old leader comes back online and rejoins the cluster as a follower.
	// And it receives the snapshot from the new leader to overwrite its
	// store. As always, after recovery, each member must be able to
	// process client requests.
	Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3
	// SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
	// inoperable but does not delete data directories on stopped nodes
	// for next restart. And it waits "delay-ms" before recovering failure.
	// The expected behavior is that nodes come back online, thus cluster
	// comes back operative as well. As always, after recovery, each member
	// must be able to process client requests.
	Case_SIGTERM_QUORUM Case = 4
	// SIGTERM_ALL stops the whole cluster but does not delete data directories
	// on disk for next restart. And it waits "delay-ms" before  recovering
	// this failure.
	// The expected behavior is that nodes come back online, thus cluster
	// comes back operative as well. As always, after recovery, each member
	// must be able to process client requests.
	Case_SIGTERM_ALL Case = 5
	// SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower
	// (non-leader), deletes its data directories on disk, and removes
	// this member from cluster (membership reconfiguration). On recovery,
	// tester adds a new member, and this member joins the existing cluster
	// with fresh data. It waits "delay-ms" before recovering this
	// failure. This simulates destroying one follower machine, where operator
	// needs to add a new member from a fresh machine.
	// The expected behavior is that a new member joins the existing cluster,
	// and then each member continues to process client requests.
	Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10
	// SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly
	// chosen follower, deletes its data directories on disk, and removes
	// this member from cluster (membership reconfiguration). On recovery,
	// tester adds a new member, and this member joins the existing cluster
	// restart. On member remove, cluster waits until most up-to-date node
	// (leader) applies the snapshot count of entries since the stop operation.
	// This simulates destroying a leader machine, where operator needs to add
	// a new member from a fresh machine.
	// The expected behavior is that a new member joins the existing cluster,
	// and receives a snapshot from the active leader. As always, after
	// recovery, each member must be able to process client requests.
	Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11
	// SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
	// data directories on disk, and removes this member from cluster.
	// On recovery, tester adds a new member, and this member joins the
	// existing cluster with fresh data. It waits "delay-ms" before
	// recovering this failure. This simulates destroying a leader machine,
	// where operator needs to add a new member from a fresh machine.
	// The expected behavior is that a new member joins the existing cluster,
	// and then each member continues to process client requests.
	Case_SIGQUIT_AND_REMOVE_LEADER Case = 12
	// SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader,
	// deletes its data directories on disk, and removes this member from
	// cluster (membership reconfiguration). On recovery, tester adds a new
	// member, and this member joins the existing cluster restart. On member
	// remove, cluster waits until most up-to-date node (new leader) applies
	// the snapshot count of entries since the stop operation. This simulates
	// destroying a leader machine, where operator needs to add a new member
	// from a fresh machine.
	// The expected behavior is that on member remove, cluster elects a new
	// leader, and a new member joins the existing cluster and receives a
	// snapshot from the newly elected leader. As always, after recovery, each
	// member must be able to process client requests.
	Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13
	// SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first
	// stops majority number of nodes, deletes data directories on those quorum
	// nodes, to make the whole cluster inoperable. Now that quorum and their
	// data are totally destroyed, cluster cannot even remove unavailable nodes
	// (e.g. 2 out of 3 are lost, so no leader can be elected).
	// Let's assume 3-node cluster of node A, B, and C. One day, node A and B
	// are destroyed and all their data are gone. The only viable solution is
	// to recover from C's latest snapshot.
	//
	// To simulate:
	//  1. Assume node C is the current leader with most up-to-date data.
	//  2. Download snapshot from node C, before destroying node A and B.
	//  3. Destroy node A and B, and make the whole cluster inoperable.
	//  4. Now node C cannot operate either.
	//  5. SIGTERM node C and remove its data directories.
	//  6. Restore a new seed member from node C's latest snapshot file.
	//  7. Add another member to establish 2-node cluster.
	//  8. Add another member to establish 3-node cluster.
	//  9. Add more if any.
	//
	// The expected behavior is that etcd successfully recovers from such
	// disastrous situation as only 1-node survives out of 3-node cluster,
	// new members joins the existing cluster, and previous data from snapshot
	// are still preserved after recovery process. As always, after recovery,
	// each member must be able to process client requests.
	Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14
	// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
	// packets from/to the peer port on a randomly chosen follower
	// (non-leader), and waits for "delay-ms" until recovery.
	// The expected behavior is that once dropping operation is undone,
	// each member must be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100
	// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops
	// all outgoing/incoming packets from/to the peer port on a randomly
	// chosen follower (non-leader), and waits for most up-to-date node
	// (leader) applies the snapshot count of entries since the blackhole
	// operation.
	// The expected behavior is that once packet drop operation is undone,
	// the slow follower tries to catch up, possibly receiving the snapshot
	// from the active leader. As always, after recovery, each member must
	// be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101
	// BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
	// from/to the peer port on the active leader (isolated), and waits for
	// "delay-ms" until recovery, in order to trigger election timeout.
	// The expected behavior is that after election timeout, a new leader gets
	// elected, and once dropping operation is undone, the old leader comes
	// back and rejoins the cluster as a follower. As always, after recovery,
	// each member must be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102
	// BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all
	// outgoing/incoming packets from/to the peer port on the active leader,
	// and waits for most up-to-date node (leader) applies the snapshot
	// count of entries since the blackhole operation.
	// The expected behavior is that cluster elects a new leader, and once
	// dropping operation is undone, the old leader comes back and rejoins
	// the cluster as a follower. The slow follower tries to catch up, likely
	// receiving the snapshot from the new active leader. As always, after
	// recovery, each member must be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103
	// BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
	// from/to the peer ports on majority nodes of cluster, thus losing its
	// leader and cluster being inoperable. And it waits for "delay-ms"
	// until recovery.
	// The expected behavior is that once packet drop operation is undone,
	// nodes come back online, thus cluster comes back operative. As always,
	// after recovery, each member must be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104
	// BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
	// from/to the peer ports on all nodes, thus making cluster totally
	// inoperable. It waits for "delay-ms" until recovery.
	// The expected behavior is that once packet drop operation is undone,
	// nodes come back online, thus cluster comes back operative. As always,
	// after recovery, each member must be able to process client requests.
	Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105
	// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
	// from/to the peer port on a randomly chosen follower (non-leader).
	// It waits for "delay-ms" until recovery.
	// The expected behavior is that once packet delay operation is undone,
	// the follower comes back and tries to catch up with latest changes from
	// cluster. And as always, after recovery, each member must be able to
	// process client requests.
	Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200
	// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
	// packets from/to the peer port on a randomly chosen follower
	// (non-leader) with a randomized time duration (thus isolated). It
	// waits for "delay-ms" until recovery.
	// The expected behavior is that once packet delay operation is undone,
	// each member must be able to process client requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201
	// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
	// outgoing/incoming packets from/to the peer port on a randomly chosen
	// follower (non-leader), and waits for most up-to-date node (leader)
	// applies the snapshot count of entries since the delay operation.
	// The expected behavior is that the delayed follower gets isolated
	// and behind the current active leader, and once delay operation is undone,
	// the slow follower comes back and catches up possibly receiving snapshot
	// from the active leader. As always, after recovery, each member must be
	// able to process client requests.
	Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202
	// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
	// outgoing/incoming packets from/to the peer port on a randomly chosen
	// follower (non-leader) with a randomized time duration, and waits for
	// most up-to-date node (leader) applies the snapshot count of entries
	// since the delay operation.
	// The expected behavior is that the delayed follower gets isolated
	// and behind the current active leader, and once delay operation is undone,
	// the slow follower comes back and catches up, possibly receiving a
	// snapshot from the active leader. As always, after recovery, each member
	// must be able to process client requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203
	// DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
	// the peer port on the active leader. And waits for "delay-ms" until
	// recovery.
	// The expected behavior is that cluster may elect a new leader, and
	// once packet delay operation is undone, the (old) leader comes back
	// and tries to catch up with latest changes from cluster. As always,
	// after recovery, each member must be able to process client requests.
	Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204
	// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
	// from/to the peer port on the active leader with a randomized time
	// duration. And waits for "delay-ms" until recovery.
	// The expected behavior is that cluster may elect a new leader, and
	// once packet delay operation is undone, the (old) leader comes back
	// and tries to catch up with latest changes from cluster. As always,
	// after recovery, each member must be able to process client requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205
	// DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
	// outgoing/incoming packets from/to the peer port on the active leader,
	// and waits for most up-to-date node (current or new leader) applies the
	// snapshot count of entries since the delay operation.
	// The expected behavior is that cluster may elect a new leader, and
	// the old leader gets isolated and behind the current active leader,
	// and once delay operation is undone, the slow follower comes back
	// and catches up, likely receiving a snapshot from the active leader.
	// As always, after recovery, each member must be able to process client
	// requests.
	Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206
	// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
	// outgoing/incoming packets from/to the peer port on the active leader,
	// with a randomized time duration. And it waits for most up-to-date node
	// (current or new leader) applies the snapshot count of entries since the
	// delay operation.
	// The expected behavior is that cluster may elect a new leader, and
	// the old leader gets isolated and behind the current active leader,
	// and once delay operation is undone, the slow follower comes back
	// and catches up, likely receiving a snapshot from the active leader.
	// As always, after recovery, each member must be able to process client
	// requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207
	// DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
	// the peer ports on majority nodes of cluster. And it waits for
	// "delay-ms" until recovery, likely to trigger election timeouts.
	// The expected behavior is that cluster may elect a new leader, while
	// quorum of nodes struggle with slow networks, and once delay operation
	// is undone, nodes come back and cluster comes back operative. As always,
	// after recovery, each member must be able to process client requests.
	Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208
	// RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
	// from/to the peer ports on majority nodes of cluster, with randomized
	// time durations. And it waits for "delay-ms" until recovery, likely
	// to trigger election timeouts.
	// The expected behavior is that cluster may elect a new leader, while
	// quorum of nodes struggle with slow networks, and once delay operation
	// is undone, nodes come back and cluster comes back operative. As always,
	// after recovery, each member must be able to process client requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209
	// DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
	// peer ports on all nodes. And it waits for "delay-ms" until recovery,
	// likely to trigger election timeouts.
	// The expected behavior is that cluster may become totally inoperable,
	// struggling with slow networks across the whole cluster. Once delay
	// operation is undone, nodes come back and cluster comes back operative.
	// As always, after recovery, each member must be able to process client
	// requests.
	Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210
	// RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
	// from/to the peer ports on all nodes, with randomized time durations.
	// And it waits for "delay-ms" until recovery, likely to trigger
	// election timeouts.
	// The expected behavior is that cluster may become totally inoperable,
	// struggling with slow networks across the whole cluster. Once delay
	// operation is undone, nodes come back and cluster comes back operative.
	// As always, after recovery, each member must be able to process client
	// requests.
	Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211
	// NO_FAIL_WITH_STRESS stops injecting failures while testing the
	// consistency and correctness under pressure loads, for the duration of
	// "delay-ms". Goal is to ensure cluster be still making progress
	// on recovery, and verify system does not deadlock following a sequence
	// of failure injections.
	// The expected behavior is that cluster remains fully operative in healthy
	// condition. As always, after recovery, each member must be able to process
	// client requests.
	Case_NO_FAIL_WITH_STRESS Case = 300
	// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
	// sends stressig client requests to the cluster, for the duration of
	// "delay-ms". Goal is to ensure cluster be still making progress
	// on recovery, and verify system does not deadlock following a sequence
	// of failure injections.
	// The expected behavior is that cluster remains fully operative in healthy
	// condition, and clients requests during liveness period succeed without
	// errors.
	// Note: this is how Google Chubby does failure injection testing
	// https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf.
	Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301
	// FAILPOINTS injects failpoints to etcd server runtime, triggering panics
	// in critical code paths.
	Case_FAILPOINTS Case = 400
	// EXTERNAL runs external failure injection scripts.
	Case_EXTERNAL Case = 500
)

func (Case) EnumDescriptor

func (Case) EnumDescriptor() ([]byte, []int)

func (Case) String

func (x Case) String() string

type Checker

type Checker int32
const (
	Checker_KV_HASH      Checker = 0
	Checker_LEASE_EXPIRE Checker = 1
	Checker_RUNNER       Checker = 2
	Checker_NO_CHECK     Checker = 3
)

func (Checker) EnumDescriptor

func (Checker) EnumDescriptor() ([]byte, []int)

func (Checker) String

func (x Checker) String() string

type Etcd

type Etcd struct {
	Name    string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
	DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
	WALDir  string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
	// HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
	// Default value is 100, which is 100ms.
	HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"`
	// ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
	// Default value is 1000, which is 1s.
	ElectionTimeoutMs   int64    `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"`
	ListenClientURLs    []string `protobuf:"bytes,21,rep,name=ListenClientURLs" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
	AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
	ClientAutoTLS       bool     `protobuf:"varint,23,opt,name=ClientAutoTLS,proto3" json:"ClientAutoTLS,omitempty" yaml:"auto-tls"`
	ClientCertAuth      bool     `protobuf:"varint,24,opt,name=ClientCertAuth,proto3" json:"ClientCertAuth,omitempty" yaml:"client-cert-auth"`
	ClientCertFile      string   `protobuf:"bytes,25,opt,name=ClientCertFile,proto3" json:"ClientCertFile,omitempty" yaml:"cert-file"`
	ClientKeyFile       string   `protobuf:"bytes,26,opt,name=ClientKeyFile,proto3" json:"ClientKeyFile,omitempty" yaml:"key-file"`
	ClientTrustedCAFile string   `protobuf:"bytes,27,opt,name=ClientTrustedCAFile,proto3" json:"ClientTrustedCAFile,omitempty" yaml:"trusted-ca-file"`
	ListenPeerURLs      []string `protobuf:"bytes,31,rep,name=ListenPeerURLs" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
	AdvertisePeerURLs   []string `protobuf:"bytes,32,rep,name=AdvertisePeerURLs" json:"AdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
	PeerAutoTLS         bool     `protobuf:"varint,33,opt,name=PeerAutoTLS,proto3" json:"PeerAutoTLS,omitempty" yaml:"peer-auto-tls"`
	PeerClientCertAuth  bool     `protobuf:"varint,34,opt,name=PeerClientCertAuth,proto3" json:"PeerClientCertAuth,omitempty" yaml:"peer-client-cert-auth"`
	PeerCertFile        string   `protobuf:"bytes,35,opt,name=PeerCertFile,proto3" json:"PeerCertFile,omitempty" yaml:"peer-cert-file"`
	PeerKeyFile         string   `protobuf:"bytes,36,opt,name=PeerKeyFile,proto3" json:"PeerKeyFile,omitempty" yaml:"peer-key-file"`
	PeerTrustedCAFile   string   `protobuf:"bytes,37,opt,name=PeerTrustedCAFile,proto3" json:"PeerTrustedCAFile,omitempty" yaml:"peer-trusted-ca-file"`
	InitialCluster      string   `protobuf:"bytes,41,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
	InitialClusterState string   `protobuf:"bytes,42,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
	InitialClusterToken string   `protobuf:"bytes,43,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
	SnapshotCount       int64    `protobuf:"varint,51,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
	QuotaBackendBytes   int64    `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
	PreVote             bool     `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
	InitialCorruptCheck bool     `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
}

func (*Etcd) Descriptor

func (*Etcd) Descriptor() ([]byte, []int)

func (*Etcd) Flags

func (cfg *Etcd) Flags() (fs []string)

Flags returns etcd flags in string slice.

func (*Etcd) Marshal

func (m *Etcd) Marshal() (dAtA []byte, err error)

func (*Etcd) MarshalTo

func (m *Etcd) MarshalTo(dAtA []byte) (int, error)

func (*Etcd) ProtoMessage

func (*Etcd) ProtoMessage()

func (*Etcd) Reset

func (m *Etcd) Reset()

func (*Etcd) Size

func (m *Etcd) Size() (n int)

func (*Etcd) String

func (m *Etcd) String() string

func (*Etcd) Unmarshal

func (m *Etcd) Unmarshal(dAtA []byte) error

type Member

type Member struct {
	// EtcdExecPath is the executable etcd binary path in agent server.
	EtcdExecPath string `protobuf:"bytes,1,opt,name=EtcdExecPath,proto3" json:"EtcdExecPath,omitempty" yaml:"etcd-exec-path"`
	// AgentAddr is the agent HTTP server address.
	AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"`
	// FailpointHTTPAddr is the agent's failpoints HTTP server address.
	FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"`
	// BaseDir is the base directory where all logs and etcd data are stored.
	BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"`
	// EtcdLogPath is the log file to store current etcd server logs.
	EtcdLogPath string `protobuf:"bytes,102,opt,name=EtcdLogPath,proto3" json:"EtcdLogPath,omitempty" yaml:"etcd-log-path"`
	// EtcdClientProxy is true when client traffic needs to be proxied.
	// If true, listen client URL port must be different than advertise client URL port.
	EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"`
	// EtcdPeerProxy is true when peer traffic needs to be proxied.
	// If true, listen peer URL port must be different than advertise peer URL port.
	EtcdPeerProxy bool `protobuf:"varint,202,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"`
	// EtcdClientEndpoint is the etcd client endpoint.
	EtcdClientEndpoint string `protobuf:"bytes,301,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"`
	// Etcd defines etcd binary configuration flags.
	Etcd *Etcd `protobuf:"bytes,302,opt,name=Etcd" json:"Etcd,omitempty" yaml:"etcd"`
	// EtcdOnSnapshotRestore defines one-time use configuration during etcd
	// snapshot recovery process.
	EtcdOnSnapshotRestore *Etcd `protobuf:"bytes,303,opt,name=EtcdOnSnapshotRestore" json:"EtcdOnSnapshotRestore,omitempty"`
	// ClientCertData contains cert file contents from this member's etcd server.
	ClientCertData string `protobuf:"bytes,401,opt,name=ClientCertData,proto3" json:"ClientCertData,omitempty" yaml:"client-cert-data"`
	ClientCertPath string `protobuf:"bytes,402,opt,name=ClientCertPath,proto3" json:"ClientCertPath,omitempty" yaml:"client-cert-path"`
	// ClientKeyData contains key file contents from this member's etcd server.
	ClientKeyData string `protobuf:"bytes,403,opt,name=ClientKeyData,proto3" json:"ClientKeyData,omitempty" yaml:"client-key-data"`
	ClientKeyPath string `protobuf:"bytes,404,opt,name=ClientKeyPath,proto3" json:"ClientKeyPath,omitempty" yaml:"client-key-path"`
	// ClientTrustedCAData contains trusted CA file contents from this member's etcd server.
	ClientTrustedCAData string `protobuf:"bytes,405,opt,name=ClientTrustedCAData,proto3" json:"ClientTrustedCAData,omitempty" yaml:"client-trusted-ca-data"`
	ClientTrustedCAPath string `protobuf:"bytes,406,opt,name=ClientTrustedCAPath,proto3" json:"ClientTrustedCAPath,omitempty" yaml:"client-trusted-ca-path"`
	// PeerCertData contains cert file contents from this member's etcd server.
	PeerCertData string `protobuf:"bytes,501,opt,name=PeerCertData,proto3" json:"PeerCertData,omitempty" yaml:"peer-cert-data"`
	PeerCertPath string `protobuf:"bytes,502,opt,name=PeerCertPath,proto3" json:"PeerCertPath,omitempty" yaml:"peer-cert-path"`
	// PeerKeyData contains key file contents from this member's etcd server.
	PeerKeyData string `protobuf:"bytes,503,opt,name=PeerKeyData,proto3" json:"PeerKeyData,omitempty" yaml:"peer-key-data"`
	PeerKeyPath string `protobuf:"bytes,504,opt,name=PeerKeyPath,proto3" json:"PeerKeyPath,omitempty" yaml:"peer-key-path"`
	// PeerTrustedCAData contains trusted CA file contents from this member's etcd server.
	PeerTrustedCAData string `protobuf:"bytes,505,opt,name=PeerTrustedCAData,proto3" json:"PeerTrustedCAData,omitempty" yaml:"peer-trusted-ca-data"`
	PeerTrustedCAPath string `protobuf:"bytes,506,opt,name=PeerTrustedCAPath,proto3" json:"PeerTrustedCAPath,omitempty" yaml:"peer-trusted-ca-path"`
	// SnapshotPath is the snapshot file path to store or restore from.
	SnapshotPath string `protobuf:"bytes,601,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty" yaml:"snapshot-path"`
	// SnapshotInfo contains last SAVE_SNAPSHOT request results.
	SnapshotInfo *SnapshotInfo `protobuf:"bytes,602,opt,name=SnapshotInfo" json:"SnapshotInfo,omitempty"`
}

func (*Member) CheckCompact

func (m *Member) CheckCompact(rev int64) error

CheckCompact ensures that historical data before given revision has been compacted.

func (*Member) Compact

func (m *Member) Compact(rev int64, timeout time.Duration) error

Compact compacts member storage with given revision. It blocks until it's physically done.

func (*Member) CreateEtcdClient

func (m *Member) CreateEtcdClient(opts ...grpc.DialOption) (*clientv3.Client, error)

CreateEtcdClient creates a client from member.

func (*Member) CreateEtcdClientConfig

func (m *Member) CreateEtcdClientConfig(opts ...grpc.DialOption) (cfg *clientv3.Config, err error)

CreateEtcdClientConfig creates a client configuration from member.

func (*Member) Defrag

func (m *Member) Defrag() error

Defrag runs defragmentation on this member.

func (*Member) Descriptor

func (*Member) Descriptor() ([]byte, []int)

func (*Member) DialEtcdGRPCServer

func (m *Member) DialEtcdGRPCServer(opts ...grpc.DialOption) (*grpc.ClientConn, error)

DialEtcdGRPCServer creates a raw gRPC connection to an etcd member.

func (*Member) ElectionTimeout

func (m *Member) ElectionTimeout() time.Duration

ElectionTimeout returns an election timeout duration.

func (*Member) IsLeader

func (m *Member) IsLeader() (bool, error)

IsLeader returns true if this member is the current cluster leader.

func (*Member) Marshal

func (m *Member) Marshal() (dAtA []byte, err error)

func (*Member) MarshalTo

func (m *Member) MarshalTo(dAtA []byte) (int, error)

func (*Member) ProtoMessage

func (*Member) ProtoMessage()

func (*Member) Reset

func (m *Member) Reset()

func (*Member) RestoreSnapshot

func (m *Member) RestoreSnapshot(lg *zap.Logger) (err error)

RestoreSnapshot restores a cluster from a given snapshot file on disk. It's meant to requested remotely, so that local member can load the snapshot file from local disk.

func (*Member) Rev

func (m *Member) Rev(ctx context.Context) (int64, error)

Rev fetches current revision on this member.

func (*Member) RevHash

func (m *Member) RevHash() (int64, int64, error)

RevHash fetches current revision and hash on this member.

func (*Member) SaveSnapshot

func (m *Member) SaveSnapshot(lg *zap.Logger) (err error)

SaveSnapshot downloads a snapshot file from this member, locally. It's meant to requested remotely, so that local member can store snapshot file on local disk.

func (*Member) Size

func (m *Member) Size() (n int)

func (*Member) String

func (m *Member) String() string

func (*Member) Unmarshal

func (m *Member) Unmarshal(dAtA []byte) error

func (*Member) WriteHealthKey

func (m *Member) WriteHealthKey() error

WriteHealthKey writes a health key to this member.

type Operation

type Operation int32
const (
	// NOT_STARTED is the agent status before etcd first start.
	Operation_NOT_STARTED Operation = 0
	// INITIAL_START_ETCD is only called to start etcd, the very first time.
	Operation_INITIAL_START_ETCD Operation = 10
	// RESTART_ETCD is sent to restart killed etcd.
	Operation_RESTART_ETCD Operation = 11
	// SIGTERM_ETCD pauses etcd process while keeping data directories
	// and previous etcd configurations.
	Operation_SIGTERM_ETCD Operation = 20
	// SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data
	// directories to simulate destroying the whole machine.
	Operation_SIGQUIT_ETCD_AND_REMOVE_DATA Operation = 21
	// SAVE_SNAPSHOT is sent to trigger local member to download its snapshot
	// onto its local disk with the specified path from tester.
	Operation_SAVE_SNAPSHOT Operation = 30
	// RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to
	// restore a cluster from existing snapshot from disk, and restart
	// an etcd instance from recovered data.
	Operation_RESTORE_RESTART_FROM_SNAPSHOT Operation = 31
	// RESTART_FROM_SNAPSHOT is sent to trigger local member to restart
	// and join an existing cluster that has been recovered from a snapshot.
	// Local member joins this cluster with fresh data.
	Operation_RESTART_FROM_SNAPSHOT Operation = 32
	// SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed,
	// thus need to archive etcd data directories.
	Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA Operation = 40
	// SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT destroys etcd process,
	// etcd data, and agent server.
	Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT Operation = 41
	// BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to
	// the peer port on target member's peer port.
	Operation_BLACKHOLE_PEER_PORT_TX_RX Operation = 100
	// UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping.
	Operation_UNBLACKHOLE_PEER_PORT_TX_RX Operation = 101
	// DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to
	// the peer port on target member's peer port.
	Operation_DELAY_PEER_PORT_TX_RX Operation = 200
	// UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays.
	Operation_UNDELAY_PEER_PORT_TX_RX Operation = 201
)

func (Operation) EnumDescriptor

func (Operation) EnumDescriptor() ([]byte, []int)

func (Operation) String

func (x Operation) String() string

type Request

type Request struct {
	Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"`
	// Member contains the same Member object from tester configuration.
	Member *Member `protobuf:"bytes,2,opt,name=Member" json:"Member,omitempty"`
	// Tester contains tester configuration.
	Tester *Tester `protobuf:"bytes,3,opt,name=Tester" json:"Tester,omitempty"`
}

func (*Request) Descriptor

func (*Request) Descriptor() ([]byte, []int)

func (*Request) Marshal

func (m *Request) Marshal() (dAtA []byte, err error)

func (*Request) MarshalTo

func (m *Request) MarshalTo(dAtA []byte) (int, error)

func (*Request) ProtoMessage

func (*Request) ProtoMessage()

func (*Request) Reset

func (m *Request) Reset()

func (*Request) Size

func (m *Request) Size() (n int)

func (*Request) String

func (m *Request) String() string

func (*Request) Unmarshal

func (m *Request) Unmarshal(dAtA []byte) error

type Response

type Response struct {
	Success bool   `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
	Status  string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"`
	// Member contains the same Member object from tester request.
	Member *Member `protobuf:"bytes,3,opt,name=Member" json:"Member,omitempty"`
	// SnapshotInfo contains SAVE_SNAPSHOT request results.
	SnapshotInfo *SnapshotInfo `protobuf:"bytes,4,opt,name=SnapshotInfo" json:"SnapshotInfo,omitempty"`
}

func (*Response) Descriptor

func (*Response) Descriptor() ([]byte, []int)

func (*Response) Marshal

func (m *Response) Marshal() (dAtA []byte, err error)

func (*Response) MarshalTo

func (m *Response) MarshalTo(dAtA []byte) (int, error)

func (*Response) ProtoMessage

func (*Response) ProtoMessage()

func (*Response) Reset

func (m *Response) Reset()

func (*Response) Size

func (m *Response) Size() (n int)

func (*Response) String

func (m *Response) String() string

func (*Response) Unmarshal

func (m *Response) Unmarshal(dAtA []byte) error

type SnapshotInfo

type SnapshotInfo struct {
	MemberName        string   `protobuf:"bytes,1,opt,name=MemberName,proto3" json:"MemberName,omitempty"`
	MemberClientURLs  []string `protobuf:"bytes,2,rep,name=MemberClientURLs" json:"MemberClientURLs,omitempty"`
	SnapshotPath      string   `protobuf:"bytes,3,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty"`
	SnapshotFileSize  string   `protobuf:"bytes,4,opt,name=SnapshotFileSize,proto3" json:"SnapshotFileSize,omitempty"`
	SnapshotTotalSize string   `protobuf:"bytes,5,opt,name=SnapshotTotalSize,proto3" json:"SnapshotTotalSize,omitempty"`
	SnapshotTotalKey  int64    `protobuf:"varint,6,opt,name=SnapshotTotalKey,proto3" json:"SnapshotTotalKey,omitempty"`
	SnapshotHash      int64    `protobuf:"varint,7,opt,name=SnapshotHash,proto3" json:"SnapshotHash,omitempty"`
	SnapshotRevision  int64    `protobuf:"varint,8,opt,name=SnapshotRevision,proto3" json:"SnapshotRevision,omitempty"`
	Took              string   `protobuf:"bytes,9,opt,name=Took,proto3" json:"Took,omitempty"`
}

SnapshotInfo contains SAVE_SNAPSHOT request results.

func (*SnapshotInfo) Descriptor

func (*SnapshotInfo) Descriptor() ([]byte, []int)

func (*SnapshotInfo) Marshal

func (m *SnapshotInfo) Marshal() (dAtA []byte, err error)

func (*SnapshotInfo) MarshalTo

func (m *SnapshotInfo) MarshalTo(dAtA []byte) (int, error)

func (*SnapshotInfo) ProtoMessage

func (*SnapshotInfo) ProtoMessage()

func (*SnapshotInfo) Reset

func (m *SnapshotInfo) Reset()

func (*SnapshotInfo) Size

func (m *SnapshotInfo) Size() (n int)

func (*SnapshotInfo) String

func (m *SnapshotInfo) String() string

func (*SnapshotInfo) Unmarshal

func (m *SnapshotInfo) Unmarshal(dAtA []byte) error

type Stresser

type Stresser int32
const (
	Stresser_KV                Stresser = 0
	Stresser_LEASE             Stresser = 1
	Stresser_ELECTION_RUNNER   Stresser = 2
	Stresser_WATCH_RUNNER      Stresser = 3
	Stresser_LOCK_RACER_RUNNER Stresser = 4
	Stresser_LEASE_RUNNER      Stresser = 5
)

func (Stresser) EnumDescriptor

func (Stresser) EnumDescriptor() ([]byte, []int)

func (Stresser) String

func (x Stresser) String() string

type Tester

type Tester struct {
	DataDir string `protobuf:"bytes,1,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
	Network string `protobuf:"bytes,2,opt,name=Network,proto3" json:"Network,omitempty" yaml:"network"`
	Addr    string `protobuf:"bytes,3,opt,name=Addr,proto3" json:"Addr,omitempty" yaml:"addr"`
	// DelayLatencyMsRv is the delay latency in milliseconds,
	// to inject to simulated slow network.
	DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"`
	// DelayLatencyMsRv is the delay latency random variable in milliseconds.
	DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"`
	// UpdatedDelayLatencyMs is the update delay latency in milliseconds,
	// to inject to simulated slow network. It's the final latency to apply,
	// in case the latency numbers are randomly generated from given delay latency field.
	UpdatedDelayLatencyMs uint32 `` /* 129-byte string literal not displayed */
	// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
	RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
	// ExitOnCaseFail is true, then exit tester on first failure.
	ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"`
	// EnablePprof is true to enable profiler.
	EnablePprof bool `protobuf:"varint,23,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"`
	// CaseDelayMs is the delay duration after failure is injected.
	// Useful when triggering snapshot or no-op failure cases.
	CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"`
	// CaseShuffle is true to randomize failure injecting order.
	CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"`
	// Cases is the selected test cases to schedule.
	// If empty, run all failure cases.
	Cases []string `protobuf:"bytes,33,rep,name=Cases" json:"Cases,omitempty" yaml:"cases"`
	// FailpointCommands is the list of "gofail" commands
	// (e.g. panic("etcd-tester"),1*sleep(1000).
	FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"`
	// RunnerExecPath is a path of etcd-runner binary.
	RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"`
	// ExternalExecPath is a path of script for enabling/disabling an external fault injector.
	ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"`
	// Stressers is the list of stresser types:
	// KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER.
	Stressers []string `protobuf:"bytes,101,rep,name=Stressers" json:"Stressers,omitempty" yaml:"stressers"`
	// Checkers is the list of consistency checker types:
	// KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER.
	// Leave empty to skip consistency checks.
	Checkers []string `protobuf:"bytes,102,rep,name=Checkers" json:"Checkers,omitempty" yaml:"checkers"`
	// StressKeySize is the size of each small key written into etcd.
	StressKeySize int32 `protobuf:"varint,201,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"`
	// StressKeySizeLarge is the size of each large key written into etcd.
	StressKeySizeLarge int32 `protobuf:"varint,202,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"`
	// StressKeySuffixRange is the count of key range written into etcd.
	// Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
	StressKeySuffixRange int32 `` /* 127-byte string literal not displayed */
	// StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
	// Stress keys are created with "fmt.Sprintf("/k%03d", i)".
	StressKeySuffixRangeTxn int32 `` /* 137-byte string literal not displayed */
	// StressKeyTxnOps is the number of operations per a transaction (max 64).
	StressKeyTxnOps int32 `protobuf:"varint,205,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"`
	// StressClients is the number of concurrent stressing clients
	// with "one" shared TCP connection.
	StressClients int32 `protobuf:"varint,301,opt,name=StressClients,proto3" json:"StressClients,omitempty" yaml:"stress-clients"`
	// StressQPS is the maximum number of stresser requests per second.
	StressQPS int32 `protobuf:"varint,302,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"`
}

func (*Tester) Descriptor

func (*Tester) Descriptor() ([]byte, []int)

func (*Tester) Marshal

func (m *Tester) Marshal() (dAtA []byte, err error)

func (*Tester) MarshalTo

func (m *Tester) MarshalTo(dAtA []byte) (int, error)

func (*Tester) ProtoMessage

func (*Tester) ProtoMessage()

func (*Tester) Reset

func (m *Tester) Reset()

func (*Tester) Size

func (m *Tester) Size() (n int)

func (*Tester) String

func (m *Tester) String() string

func (*Tester) Unmarshal

func (m *Tester) Unmarshal(dAtA []byte) error

type TransportClient

type TransportClient interface {
	Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error)
}

func NewTransportClient

func NewTransportClient(cc *grpc.ClientConn) TransportClient

type TransportServer

type TransportServer interface {
	Transport(Transport_TransportServer) error
}

type Transport_TransportClient

type Transport_TransportClient interface {
	Send(*Request) error
	Recv() (*Response, error)
	grpc.ClientStream
}

type Transport_TransportServer

type Transport_TransportServer interface {
	Send(*Response) error
	Recv() (*Request, error)
	grpc.ServerStream
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL