Documentation
¶
Index ¶
- Constants
- Variables
- func RegisterScenarios() error
- type ConsensusAccountsTx
- type ConsensusTransferTx
- type GetKeyValueTx
- type GetRuntimeIDTx
- type InsertKeyValueTx
- type InsertMsg
- type KVTestClient
- func (cli *KVTestClient) Clone() TestClient
- func (cli *KVTestClient) Init(scenario *Scenario) error
- func (cli *KVTestClient) Start(ctx context.Context, childEnv *env.Env) error
- func (cli *KVTestClient) Stop() error
- func (cli *KVTestClient) Wait() error
- func (cli *KVTestClient) WithScenario(scenario TestClientScenario) *KVTestClient
- func (cli *KVTestClient) WithSeed(seed string) *KVTestClient
- type KeyValueQuery
- type RemoveKeyValueTx
- type Scenario
- func (sc *Scenario) Clone() scenario.Scenario
- func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error)
- func (sc *Scenario) PreInit(childEnv *env.Env) error
- func (sc *Scenario) ResolveRuntimeBinaries(runtimeBinary string) map[node.TEEHardware]string
- func (sc *Scenario) ResolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string
- func (sc *Scenario) ResolveRuntimeUpgradeBinaries(runtimeBinary string) map[node.TEEHardware]string
- func (sc *Scenario) ResolveRuntimeUpgradeBinary(runtimeBinary string, tee node.TEEHardware) string
- func (sc *Scenario) Run(ctx context.Context, childEnv *env.Env) error
- func (sc *Scenario) StartNetworkAndTestClient(ctx context.Context, childEnv *env.Env) error
- func (sc *Scenario) StartNetworkAndWaitForClientSync(ctx context.Context) error
- func (sc *Scenario) WaitTestClientOnly() error
- type TestClient
- type TestClientScenario
- type TrustRootImpl
- func (sc *TrustRootImpl) Clone() scenario.Scenario
- func (sc *TrustRootImpl) Fixture() (*oasis.NetworkFixture, error)
- func (sc *TrustRootImpl) PostRun(ctx context.Context, childEnv *env.Env) error
- func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err error)
- func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error)
- type TxnCall
- type TxnOutput
Constants ¶
const ( // LogEventTrustRootChangeNoTrust is the event emitted when a compute // worker or a key manager node fails to initialize the verifier as there // is not enough trust in the new light block. LogEventTrustRootChangeNoTrust = "consensus/tendermint/verifier/chain_context/no_trust" // LogEventTrustRootChangeFailed is the event emitted when a compute // worker or a key manager node fails to initialize the verifier as // the new light block is invalid, e.g. has lower height than the last // known trusted block. LogEventTrustRootChangeFailed = "consensus/tendermint/verifier/chain_context/failed" )
Variables ¶
var ( // ByzantineExecutorHonest is the byzantine executor honest scenario. ByzantineExecutorHonest scenario.Scenario = newByzantineImpl( "executor-honest", "executor", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, nil, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, }, ) // ByzantineExecutorSchedulerHonest is the byzantine executor scheduler honest scenario. ByzantineExecutorSchedulerHonest scenario.Scenario = newByzantineImpl( "executor-scheduler-honest", "executor", nil, oasis.ByzantineSlot1IdentitySeed, false, nil, []oasis.Argument{ {Name: byzantine.CfgSchedulerRoleExpected}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, IsScheduler: true, }, ) // ByzantineExecutorWrong is the byzantine executor wrong scenario. ByzantineExecutorWrong scenario.Scenario = newByzantineImpl( "executor-wrong", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeIncorrectResults: 1, staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorWrong.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, }, ) // ByzantineExecutorSchedulerWrong is the byzantine executor wrong scheduler scenario. ByzantineExecutorSchedulerWrong scenario.Scenario = newByzantineImpl( "executor-scheduler-wrong", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgSchedulerRoleExpected}, {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorWrong.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, IsScheduler: true, }, ) // ByzantineExecutorSchedulerBogus is the byzantine executor scheduler with bogus txs scenario. ByzantineExecutorSchedulerBogus scenario.Scenario = newByzantineImpl( "executor-scheduler-bogus", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgSchedulerRoleExpected}, {Name: byzantine.CfgExecutorProposeBogusTx}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, IsScheduler: true, }, ) // ByzantineExecutorStraggler is the byzantine executor straggler scenario. ByzantineExecutorStraggler scenario.Scenario = newByzantineImpl( "executor-straggler", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, }, ) // ByzantineExecutorStragglerBackup is the byzantine executor straggler scenario where the // byzantine node is both primary and backup. ByzantineExecutorStragglerBackup scenario.Scenario = newByzantineImpl( "executor-straggler-backup", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 0, }, []oasis.Argument{ {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker, scheduler.RoleBackupWorker}, }, withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) { rt.Executor.AllowedStragglers = 1 rt.Executor.GroupBackupSize = 3 }), ) // ByzantineExecutorSchedulerStraggler is the byzantine executor scheduler straggler scenario. ByzantineExecutorSchedulerStraggler scenario.Scenario = newByzantineImpl( "executor-scheduler-straggler", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgSchedulerRoleExpected}, {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, IsScheduler: true, }, ) // ByzantineExecutorFailureIndicating is the byzantine executor that submits failure indicating // commitments scenario. ByzantineExecutorFailureIndicating scenario.Scenario = newByzantineImpl( "executor-failure-indicating", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, }, ) // ByzantineExecutorSchedulerFailureIndicating is the byzantine executor scheduler failure indicating scenario. ByzantineExecutorSchedulerFailureIndicating scenario.Scenario = newByzantineImpl( "executor-scheduler-failure-indicating", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeLiveness: 1, }, []oasis.Argument{ {Name: byzantine.CfgSchedulerRoleExpected}, {Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, IsScheduler: true, }, ) // ByzantineExecutorCorruptGetDiff is the byzantine executor node scenario that corrupts GetDiff // responses. ByzantineExecutorCorruptGetDiff scenario.Scenario = newByzantineImpl( "executor-corrupt-getdiff", "executor", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, []oasis.Argument{ {Name: byzantine.CfgCorruptGetDiff}, }, scheduler.ForceElectCommitteeRole{ Kind: scheduler.KindComputeExecutor, Roles: []scheduler.Role{scheduler.RoleWorker}, }, ) )
var ( // DumpRestore is the dump and restore scenario. DumpRestore scenario.Scenario = newDumpRestoreImpl("dump-restore", nil) // DumpRestoreRuntimeRoundAdvance is the scenario where additional rounds are simulated after // the runtime stopped in the old network (so storage node state is behind). DumpRestoreRuntimeRoundAdvance scenario.Scenario = newDumpRestoreImpl( "dump-restore/runtime-round-advance", func(doc *genesis.Document) { for _, st := range doc.RootHash.RuntimeStates { st.Round += 10 } }, ) )
var ( // GovernanceConsensusUpgrade is the governance consensus upgrade scenario. GovernanceConsensusUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, false) // GovernanceConsensusFailUpgrade is the governance consensus upgrade scenario // where node should fail the upgrade. GovernanceConsensusFailUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(false, false) // GovernanceConsensusCancelUpgrade is the governance consensus upgrade scenario // where the pending upgrade is canceled. GovernanceConsensusCancelUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, true) )
var ( // HaltRestore is the halt and restore scenario. HaltRestore scenario.Scenario = newHaltRestoreImpl(false) // HaltRestoreSuspended is the halt and restore scenario with a suspended runtime. HaltRestoreSuspended scenario.Scenario = newHaltRestoreImpl(true) )
var ( // ParamsDummyScenario is a dummy instance of runtimeImpl used to register global e2e/runtime flags. ParamsDummyScenario = NewScenario("", nil) // Runtime is the basic network + client test case with runtime support. Runtime scenario.Scenario = NewScenario( "runtime", NewKVTestClient().WithScenario(SimpleKeyValueScenario), ) // RuntimeEncryption is the basic network + client with encryption test case. RuntimeEncryption scenario.Scenario = NewScenario( "runtime-encryption", NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario), ) // DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher // handler factories for the basic scenario. DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoExecutionDiscrepancyDetected(), } )
var ( InsertKeyValueScenario = NewTestClientScenario([]interface{}{ InsertKeyValueTx{"my_key", "my_value", "", false}, GetKeyValueTx{"my_key", "my_value", false}, }) InsertKeyValueEncScenario = NewTestClientScenario([]interface{}{ InsertKeyValueTx{"my_key", "my_value", "", true}, GetKeyValueTx{"my_key", "my_value", true}, }) RemoveKeyValueScenario = NewTestClientScenario([]interface{}{ GetKeyValueTx{"my_key", "my_value", false}, }) RemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{ GetKeyValueTx{"my_key", "my_value", true}, }) InsertTransferKeyValueScenario = NewTestClientScenario([]interface{}{ InsertKeyValueTx{"my_key", "my_value", "", false}, GetKeyValueTx{"my_key", "my_value", false}, ConsensusTransferTx{}, }) InsertRemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{ InsertKeyValueTx{"my_key", "my_value", "", true}, GetKeyValueTx{"my_key", "my_value", true}, RemoveKeyValueTx{"my_key", "my_value", true}, GetKeyValueTx{"my_key", "", true}, }) InsertRemoveKeyValueEncScenarioV2 = NewTestClientScenario([]interface{}{ InsertKeyValueTx{"my_key2", "my_value2", "", true}, GetKeyValueTx{"my_key2", "my_value2", true}, RemoveKeyValueTx{"my_key2", "my_value2", true}, GetKeyValueTx{"my_key2", "", true}, }) SimpleKeyValueScenario = newSimpleKeyValueScenario(false, false) SimpleKeyValueEncScenario = newSimpleKeyValueScenario(false, true) SimpleKeyValueScenarioRepeated = newSimpleKeyValueScenario(true, false) )
var ( // TrustRootChangeTest is a happy path scenario which tests if trust // can be transferred to a new light block when consensus chain context // changes, e.g. on dump-restore network upgrades. TrustRootChangeTest scenario.Scenario = newTrustRootChangeImpl( "change", NewKVTestClient().WithScenario(InsertKeyValueEncScenario), true, ) // TrustRootChangeFailsTest is an unhappy path scenario which tests // that trust is never transferred to untrusted or invalid light blocks when // consensus chain context changes. TrustRootChangeFailsTest scenario.Scenario = newTrustRootChangeImpl( "change-fails", NewKVTestClient().WithScenario(SimpleKeyValueEncScenario), false, ) )
var ArchiveAPI scenario.Scenario = &archiveAPI{ Scenario: *NewScenario( "archive-api", NewKVTestClient().WithScenario(InsertTransferKeyValueScenario), ), }
ArchiveAPI is the scenario where archive node control, consensus and runtime APIs are tested.
var GasFeesRuntimes scenario.Scenario = &gasFeesRuntimesImpl{ Scenario: *NewScenario("gas-fees/runtimes", nil), }
GasFeesRuntimes is the runtime gas fees scenario.
var HaltRestoreNonMock scenario.Scenario = newHaltRestoreNonMockImpl()
HaltRestoreNonMock is the halt and restore scenario that uses the real beacon backend.
var HistoryReindex scenario.Scenario = newHistoryReindexImpl()
HistoryReindex is the scenario that triggers roothash history reindexing.
var KeymanagerKeyGeneration scenario.Scenario = newKmKeyGenerationImpl()
KeymanagerKeyGeneration is the keymanager key generation scenario.
It uses encryption and decryption transactions provided by the simple key/value runtime to test whether the key manager client can retrieve private and public ephemeral keys from the key manager and if the latter generates those according to the specifications.
var KeymanagerReplicate scenario.Scenario = newKmReplicateImpl()
KeymanagerReplicate is the keymanager replication scenario.
var KeymanagerRestart scenario.Scenario = newKmRestartImpl()
KeymanagerRestart is the keymanager restart scenario.
var KeymanagerUpgrade scenario.Scenario = newKmUpgradeImpl()
KeymanagerUpgrade is the keymanager upgrade scenario.
var LateStart scenario.Scenario = newLateStartImpl("late-start")
LateStart is the LateStart node basic scenario.
var MultipleRuntimes = func() scenario.Scenario { sc := &multipleRuntimesImpl{ Scenario: *NewScenario("multiple-runtimes", nil), } sc.Flags.Int(cfgNumComputeRuntimes, 2, "number of compute runtimes per worker") sc.Flags.Int(cfgNumComputeRuntimeTxns, 2, "number of transactions to perform") sc.Flags.Int(cfgNumComputeWorkers, 2, "number of workers to initiate") sc.Flags.Uint16(cfgExecutorGroupSize, 2, "number of executor workers in committee") return sc }()
MultipleRuntimes is a scenario which tests running multiple runtimes on one node.
var NodeShutdown scenario.Scenario = newNodeShutdownImpl()
NodeShutdown is the keymanager restart scenario.
var OffsetRestart scenario.Scenario = newOffsetRestartImpl()
OffsetRestart is the offset restart scenario..
var RuntimeDynamic scenario.Scenario = newRuntimeDynamicImpl()
RuntimeDynamic is the dynamic runtime registration scenario.
var RuntimeGovernance = func() scenario.Scenario { sc := &runtimeGovernanceImpl{ Scenario: *NewScenario("runtime-governance", nil), } return sc }()
RuntimeGovernance is a scenario which tests runtime governance.
Two runtimes with the runtime governance model are created at genesis time. We submit an update_runtime runtime transaction with a slightly modified runtime descriptor to the first runtime. This transaction triggers the runtime to emit an update_runtime message, which in turn causes the runtime to be re-registered with the updated descriptor specified in the message. After an epoch transition, we fetch the runtime descriptor from the registry and check if the modification took place or not.
Additionally, we test that a runtime cannot update another runtime by passing a modified other runtime's descriptor to the update_runtime call of another runtime.
var ( // RuntimeMessage is the runtime message scenario. RuntimeMessage scenario.Scenario = newRuntimeMessage() )
var RuntimePrune scenario.Scenario = newRuntimePruneImpl()
RuntimePrune is the runtime prune scenario.
var RuntimeUpgrade scenario.Scenario = newRuntimeUpgradeImpl()
RuntimeUpgrade is the runtime upgrade scenario.
var ( // Sentry is the Sentry node basic scenario. Sentry scenario.Scenario = newSentryImpl() )
var StorageEarlyStateSync scenario.Scenario = newStorageEarlyStateSyncImpl()
StorageEarlyStateSync is the scenario where a runtime is registered first and is not yet operational, then a while later an executor node uses consensus layer state sync to catch up but the runtime has already advanced some epoch transition rounds and is no longer at genesis.
var StorageSync scenario.Scenario = newStorageSyncImpl()
StorageSync is the storage sync scenario.
var StorageSyncFromRegistered scenario.Scenario = newStorageSyncFromRegisteredImpl()
StorageSyncFromRegistered is the storage sync scenario which tests syncing from registered nodes not in committee.
var StorageSyncInconsistent scenario.Scenario = newStorageSyncInconsistentImpl()
StorageSyncInconsistent is the inconsistent storage sync scenario.
var TrustRoot scenario.Scenario = NewTrustRootImpl( "simple", NewKVTestClient().WithScenario(SimpleKeyValueEncScenario), )
TrustRoot is the consensus trust root verification scenario.
var TxSourceMulti scenario.Scenario = &txSourceImpl{ Scenario: *NewScenario("txsource-multi", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitLong, nodeRestartInterval: nodeRestartIntervalLong, nodeLongRestartInterval: nodeLongRestartInterval, nodeLongRestartDuration: nodeLongRestartDuration, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 1000, enableCrashPoints: true, tendermintRecoverCorruptedWAL: true, numValidatorNodes: 4, numKeyManagerNodes: 2, numComputeNodes: 5, numClientNodes: 2, }
TxSourceMulti uses multiple workloads.
var TxSourceMultiShort scenario.Scenario = &txSourceImpl{ Scenario: *NewScenario("txsource-multi-short", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitShort, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 200, numValidatorNodes: 4, numKeyManagerNodes: 2, numComputeNodes: 4, numClientNodes: 2, }
TxSourceMultiShort uses multiple workloads for a short time.
var TxSourceMultiShortSGX scenario.Scenario = &txSourceImpl{ Scenario: *NewScenario("txsource-multi-short-sgx", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitShortSGX, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 200, numValidatorNodes: 2, numKeyManagerNodes: 1, numComputeNodes: 2, numClientNodes: 1, }
TxSourceMultiShortSGX uses multiple workloads for a short time.
Functions ¶
func RegisterScenarios ¶
func RegisterScenarios() error
RegisterScenarios registers all end-to-end scenarios.
Types ¶
type ConsensusAccountsTx ¶ added in v0.2202.9
type ConsensusAccountsTx struct{}
ConsensusAccountsTx tests consensus account query.
type ConsensusTransferTx ¶ added in v0.2202.9
type ConsensusTransferTx struct{}
ConsensusTransferTx submits and empty consensus staking transfer.
type GetKeyValueTx ¶ added in v0.2202.9
GetKeyValueTx retrieves the value stored under the given key from the database, and verifies that the response (current value) contains the expected data.
type GetRuntimeIDTx ¶ added in v0.2202.9
type GetRuntimeIDTx struct{}
GetRuntimeIDTx retrieves the runtime ID.
type InsertKeyValueTx ¶ added in v0.2202.9
InsertKeyValueTx inserts a key/value pair to the database, and verifies that the response (previous value) contains the expected data.
type KVTestClient ¶ added in v0.2202.9
type KVTestClient struct {
// contains filtered or unexported fields
}
KVTestClient is a client that exercises the simple key-value test runtime.
func NewKVTestClient ¶ added in v0.2202.9
func NewKVTestClient() *KVTestClient
func (*KVTestClient) Clone ¶ added in v0.2202.9
func (cli *KVTestClient) Clone() TestClient
func (*KVTestClient) Init ¶ added in v0.2202.9
func (cli *KVTestClient) Init(scenario *Scenario) error
func (*KVTestClient) Stop ¶ added in v0.2202.9
func (cli *KVTestClient) Stop() error
func (*KVTestClient) Wait ¶ added in v0.2202.9
func (cli *KVTestClient) Wait() error
func (*KVTestClient) WithScenario ¶ added in v0.2202.9
func (cli *KVTestClient) WithScenario(scenario TestClientScenario) *KVTestClient
func (*KVTestClient) WithSeed ¶ added in v0.2202.9
func (cli *KVTestClient) WithSeed(seed string) *KVTestClient
type KeyValueQuery ¶ added in v0.2202.10
KeyValueQuery queries the value stored under the given key for the specified round from the database, and verifies that the response (current value) contains the expected data.
type RemoveKeyValueTx ¶ added in v0.2202.9
RemoveKeyValueTx removes the value stored under the given key from the database.
type Scenario ¶ added in v0.2202.9
Scenario is a base class for tests involving oasis-node with runtime.
func NewScenario ¶ added in v0.2202.9
func NewScenario(name string, testClient TestClient) *Scenario
NewScenario creates a new base scenario for oasis-node runtime end-to-end tests.
func (*Scenario) Fixture ¶ added in v0.2202.9
func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error)
func (*Scenario) ResolveRuntimeBinaries ¶ added in v0.2202.12
func (sc *Scenario) ResolveRuntimeBinaries(runtimeBinary string) map[node.TEEHardware]string
ResolveRuntimeBinaries returns the paths to the runtime binaries.
func (*Scenario) ResolveRuntimeBinary ¶ added in v0.2202.12
func (sc *Scenario) ResolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string
ResolveRuntimeBinary returns the path to the runtime binary.
func (*Scenario) ResolveRuntimeUpgradeBinaries ¶ added in v0.2202.12
func (sc *Scenario) ResolveRuntimeUpgradeBinaries(runtimeBinary string) map[node.TEEHardware]string
ResolveRuntimeUpgradeBinaries returns the paths to the runtime upgrade binaries.
func (*Scenario) ResolveRuntimeUpgradeBinary ¶ added in v0.2202.12
func (sc *Scenario) ResolveRuntimeUpgradeBinary(runtimeBinary string, tee node.TEEHardware) string
ResolveRuntimeUpgradeBinary returns the path to the runtime upgrade binary.
func (*Scenario) StartNetworkAndTestClient ¶ added in v0.2202.9
StartNetworkAndTestClient starts the network and the runtime test client.
func (*Scenario) StartNetworkAndWaitForClientSync ¶ added in v0.2202.9
StartNetworkAndWaitForClientSync starts the network and waits for the client node to sync.
func (*Scenario) WaitTestClientOnly ¶ added in v0.2202.9
WaitTestClientOnly waits for the runtime test client to finish its work.
type TestClient ¶ added in v0.2103.0
type TestClient interface { Init(*Scenario) error Start(context.Context, *env.Env) error Wait() error // Clone returns a clone of a RuntimeTestClient instance, in a state // that is ready for Init. Clone() TestClient }
TestClient is the interface exposed to implement a runtime test client that executes a pre-determined workload against a given runtime.
type TestClientScenario ¶ added in v0.2202.9
TestClientScenario is a test scenario for a key-value runtime test client.
func NewTestClientScenario ¶ added in v0.2202.9
func NewTestClientScenario(requests []interface{}) TestClientScenario
NewTestClientScenario creates a new test client scenario.
type TrustRootImpl ¶ added in v0.2202.9
type TrustRootImpl struct {
Scenario
}
func NewTrustRootImpl ¶ added in v0.2202.9
func NewTrustRootImpl(name string, testClient TestClient) *TrustRootImpl
func (*TrustRootImpl) Clone ¶ added in v0.2202.9
func (sc *TrustRootImpl) Clone() scenario.Scenario
func (*TrustRootImpl) Fixture ¶ added in v0.2202.9
func (sc *TrustRootImpl) Fixture() (*oasis.NetworkFixture, error)
func (*TrustRootImpl) PostRun ¶ added in v0.2202.10
PostRun re-builds simple key/value and key manager runtimes.
type TxnCall ¶ added in v0.2103.0
type TxnCall struct { // Nonce is a nonce. Nonce uint64 `json:"nonce"` // Method is the called method name. Method string `json:"method"` // Args are the method arguments. Args interface{} `json:"args"` }
TxnCall is a transaction call in the test runtime.
type TxnOutput ¶ added in v0.2103.0
type TxnOutput struct { // Success can be of any type. Success cbor.RawMessage // Error is a string describing the error message. Error *string }
TxnOutput is a transaction call output in the test runtime.
Source Files
¶
- archive_api.go
- byzantine.go
- dump_restore.go
- gas_fees.go
- governance_upgrade.go
- halt_restore.go
- halt_restore_nonmock.go
- history_reindex.go
- keymanager_key_generation.go
- keymanager_replicate.go
- keymanager_restart.go
- keymanager_upgrade.go
- late_start.go
- multiple_runtimes.go
- node_shutdown.go
- offset_restart.go
- runtime.go
- runtime_client.go
- runtime_client_kv.go
- runtime_client_kv_scenario.go
- runtime_dynamic.go
- runtime_governance.go
- runtime_message.go
- runtime_prune.go
- runtime_upgrade.go
- sentry.go
- storage_early_state_sync.go
- storage_sync.go
- storage_sync_from_registered.go
- storage_sync_inconsistent.go
- trust_root.go
- trust_root_change.go
- txsource.go