Documentation ¶
Index ¶
Constants ¶
This section is empty.
Variables ¶
var ( // ByzantineExecutorHonest is the byzantine executor honest scenario. ByzantineExecutorHonest scenario.Scenario = newByzantineImpl( "executor-honest", "executor", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, nil, ) // ByzantineExecutorSchedulerHonest is the byzantine executor scheduler honest scenario. ByzantineExecutorSchedulerHonest scenario.Scenario = newByzantineImpl( "executor-scheduler-honest", "executor", nil, oasis.ByzantineSlot1IdentitySeed, false, nil, []string{ "--" + byzantine.CfgSchedulerRoleExpected, }, ) // ByzantineExecutorWrong is the byzantine executor wrong scenario. ByzantineExecutorWrong scenario.Scenario = newByzantineImpl( "executor-wrong", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, map[staking.SlashReason]uint64{ staking.SlashRuntimeIncorrectResults: 1, }, []string{ "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorWrong.String(), }, ) // ByzantineExecutorSchedulerWrong is the byzantine executor wrong scheduler scenario. ByzantineExecutorSchedulerWrong scenario.Scenario = newByzantineImpl( "executor-scheduler-wrong", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, nil, []string{ "--" + byzantine.CfgSchedulerRoleExpected, "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorWrong.String(), }, ) // ByzantineExecutorStraggler is the byzantine executor straggler scenario. ByzantineExecutorStraggler scenario.Scenario = newByzantineImpl( "executor-straggler", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, nil, []string{ "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorStraggler.String(), }, ) // ByzantineExecutorSchedulerStraggler is the byzantine executor scheduler straggler scenario. ByzantineExecutorSchedulerStraggler scenario.Scenario = newByzantineImpl( "executor-scheduler-straggler", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, nil, []string{ "--" + byzantine.CfgSchedulerRoleExpected, "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorStraggler.String(), }, ) // ByzantineExecutorFailureIndicating is the byzantine executor that submits fialure indicating // commitments scenario. ByzantineExecutorFailureIndicating scenario.Scenario = newByzantineImpl( "executor-failure-indicating", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineDefaultIdentitySeed, false, nil, []string{ "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorFailureIndicating.String(), }, ) // ByzantineExecutorSchedulerFailureIndicating is the byzantine executor scheduler failure indicating scenario. ByzantineExecutorSchedulerFailureIndicating scenario.Scenario = newByzantineImpl( "executor-scheduler-failure-indicating", "executor", []log.WatcherHandlerFactory{ oasis.LogAssertRoundFailures(), oasis.LogAssertTimeouts(), oasis.LogAssertExecutionDiscrepancyDetected(), }, oasis.ByzantineSlot1IdentitySeed, false, nil, []string{ "--" + byzantine.CfgSchedulerRoleExpected, "--" + byzantine.CfgExecutorMode, byzantine.ModeExecutorFailureIndicating.String(), }, ) // ByzantineStorageHonest is the byzantine storage honest scenario. ByzantineStorageHonest scenario.Scenario = newByzantineImpl( "storage-honest", "storage", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, nil, ) // ByzantineStorageFailApply is the byzantine storage scenario where storage node fails // first 5 Apply requests. ByzantineStorageFailApply scenario.Scenario = newByzantineImpl( "storage-fail-apply", "storage", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, []string{ "--" + byzantine.CfgNumStorageFailApply, strconv.Itoa(5), }, ) // ByzantineStorageFailApplyBatch is the byzantine storage scenario where storage node fails // first 3 ApplyBatch requests. ByzantineStorageFailApplyBatch scenario.Scenario = newByzantineImpl( "storage-fail-applybatch", "storage", []log.WatcherHandlerFactory{ oasis.LogAssertExecutionDiscrepancyDetected(), oasis.LogAssertDiscrepancyMajorityFailure(), oasis.LogAssertRoundFailures(), }, oasis.ByzantineDefaultIdentitySeed, false, nil, []string{ "--" + byzantine.CfgNumStorageFailApplyBatch, strconv.Itoa(3), }, ) // ByzantineStorageFailRead is the byzantine storage node scenario that fails all read requests. ByzantineStorageFailRead scenario.Scenario = newByzantineImpl( "storage-fail-read", "storage", nil, oasis.ByzantineDefaultIdentitySeed, true, nil, []string{ "--" + byzantine.CfgFailReadRequests, }, ) // ByzantineStorageCorruptGetDiff is the byzantine storage node scenario that corrupts GetDiff // responses. ByzantineStorageCorruptGetDiff scenario.Scenario = newByzantineImpl( "storage-corrupt-getdiff", "storage", nil, oasis.ByzantineDefaultIdentitySeed, false, nil, []string{ "--" + byzantine.CfgCorruptGetDiff, }, ) )
var ( // DumpRestore is the dump and restore scenario. DumpRestore scenario.Scenario = newDumpRestoreImpl("dump-restore", nil) // DumpRestoreRuntimeRoundAdvance is the scenario where additional rounds are simulated after // the runtime stopped in the old network (so storage node state is behind). DumpRestoreRuntimeRoundAdvance scenario.Scenario = newDumpRestoreImpl( "dump-restore/runtime-round-advance", func(doc *genesis.Document) { for _, st := range doc.RootHash.RuntimeStates { st.Round += 10 } }, ) )
var ( // GovernanceConsensusUpgrade is the governance consensus upgrade scenario. GovernanceConsensusUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, false) // GovernanceConsensusFailUpgrade is the governance consensus upgrade scenario // where node should fail the upgrade. GovernanceConsensusFailUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(false, false) // GovernanceConsensusCancelUpgrade is the governance consensus upgrade scenario // where the pending upgrade is canceled. GovernanceConsensusCancelUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, true) )
var ( // HaltRestore is the halt and restore scenario. HaltRestore scenario.Scenario = newHaltRestoreImpl(false) // HaltRestoreSuspended is the halt and restore scenario with a suspended runtime. HaltRestoreSuspended scenario.Scenario = newHaltRestoreImpl(true) )
var ( // RuntimeParamsDummy is a dummy instance of runtimeImpl used to register global e2e/runtime flags. RuntimeParamsDummy *runtimeImpl = newRuntimeImpl("", "", []string{}) // Runtime is the basic network + client test case with runtime support. Runtime scenario.Scenario = newRuntimeImpl("runtime", "simple-keyvalue-client", nil) // RuntimeEncryption is the basic network + client with encryption test case. RuntimeEncryption scenario.Scenario = newRuntimeImpl("runtime-encryption", "simple-keyvalue-enc-client", nil) // DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher // handler factories for the basic scenario. DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{ oasis.LogAssertNoTimeouts(), oasis.LogAssertNoRoundFailures(), oasis.LogAssertNoExecutionDiscrepancyDetected(), } )
var ( // Sentry is the Sentry node basic scenario. Sentry scenario.Scenario = newSentryImpl("sentry", "simple-keyvalue-client", nil) // SentryEncryption is the Sentry node basic encryption scenario. SentryEncryption scenario.Scenario = newSentryImpl("sentry-encryption", "simple-keyvalue-enc-client", nil) )
var ClientExpire scenario.Scenario = newClientExpireImpl("client-expire", "simple-keyvalue-client", nil)
ClientExpire is the ClientExpire node scenario.
var GasFeesRuntimes scenario.Scenario = &gasFeesRuntimesImpl{ runtimeImpl: *newRuntimeImpl("gas-fees/runtimes", "", nil), }
GasFeesRuntimes is the runtime gas fees scenario.
var HistoryReindex scenario.Scenario = newHistoryReindexImpl()
HistoryReindex is the scenario that triggers roothash history reindexing.
var KeymanagerReplicate scenario.Scenario = newKmReplicateImpl()
KeymanagerReplicate is the keymanager replication scenario.
var KeymanagerRestart scenario.Scenario = newKmRestartImpl()
KeymanagerRestart is the keymanager restart scenario.
var KeymanagerUpgrade scenario.Scenario = newKmUpgradeImpl()
KeymanagerUpgrade is the keymanager upgrade scenario.
var LateStart scenario.Scenario = newLateStartImpl("late-start", "simple-keyvalue-client", nil)
LateStart is the LateStart node basic scenario.
var MultipleRuntimes = func() scenario.Scenario { sc := &multipleRuntimesImpl{ runtimeImpl: *newRuntimeImpl("multiple-runtimes", "simple-keyvalue-client", nil), } sc.Flags.Int(cfgNumComputeRuntimes, 2, "number of compute runtimes per worker") sc.Flags.Int(cfgNumComputeRuntimeTxns, 2, "number of transactions to perform") sc.Flags.Int(cfgNumComputeWorkers, 2, "number of workers to initiate") sc.Flags.Uint16(cfgExecutorGroupSize, 2, "number of executor workers in committee") return sc }()
MultipleRuntimes is a scenario which tests running multiple runtimes on one node.
var NodeShutdown scenario.Scenario = newNodeShutdownImpl()
NodeShutdown is the keymanager restart scenario.
var RuntimeDynamic scenario.Scenario = newRuntimeDynamicImpl()
RuntimeDynamic is the dynamic runtime registration scenario.
var RuntimeGovernance = func() scenario.Scenario { sc := &runtimeGovernanceImpl{ runtimeImpl: *newRuntimeImpl("runtime-governance", "simple-keyvalue-client", nil), } return sc }()
RuntimeGovernance is a scenario which tests runtime governance.
Two runtimes with the runtime governance model are created at genesis time. We submit an update_runtime runtime transaction with a slightly modified runtime descriptor to the first runtime. This transaction triggers the runtime to emit an update_runtime message, which in turn causes the runtime to be re-registered with the updated descriptor specified in the message. After an epoch transition, we fetch the runtime descriptor from the registry and check if the modification took place or not.
Additionally, we test that a runtime cannot update another runtime by passing a modified other runtime's descriptor to the update_runtime call of another runtime.
var ( // RuntimeMessage is the runtime message scenario. RuntimeMessage scenario.Scenario = newRuntimeMessage() )
var RuntimePrune scenario.Scenario = newRuntimePruneImpl()
RuntimePrune is the runtime prune scenario.
var RuntimeUpgrade scenario.Scenario = newRuntimeUpgradeImpl()
RuntimeUpgrade is the runtime upgrade scenario.
var StorageSync scenario.Scenario = newStorageSyncImpl()
StorageSync is the storage sync scenario.
var StorageSyncFromRegistered scenario.Scenario = newStorageSyncFromRegisteredImpl()
StorageSyncFromRegistered is the storage sync scenario which tests syncing from registered nodes not in committee.
var TxSourceMulti scenario.Scenario = &txSourceImpl{ runtimeImpl: *newRuntimeImpl("txsource-multi", "", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitLong, nodeRestartInterval: nodeRestartIntervalLong, nodeLongRestartInterval: nodeLongRestartInterval, nodeLongRestartDuration: nodeLongRestartDuration, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 1000, enableCrashPoints: true, tendermintRecoverCorruptedWAL: true, numValidatorNodes: 4, numKeyManagerNodes: 2, numStorageNodes: 4, numComputeNodes: 5, numClientNodes: 2, }
TxSourceMulti uses multiple workloads.
var TxSourceMultiShort scenario.Scenario = &txSourceImpl{ runtimeImpl: *newRuntimeImpl("txsource-multi-short", "", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitShort, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 200, numValidatorNodes: 4, numKeyManagerNodes: 2, numStorageNodes: 2, numComputeNodes: 4, numClientNodes: 2, }
TxSourceMultiShort uses multiple workloads for a short time.
var TxSourceMultiShortSGX scenario.Scenario = &txSourceImpl{ runtimeImpl: *newRuntimeImpl("txsource-multi-short-sgx", "", nil), clientWorkloads: []string{ workload.NameCommission, workload.NameDelegation, workload.NameOversized, workload.NameParallel, workload.NameRegistration, workload.NameRuntime, workload.NameTransfer, workload.NameGovernance, }, allNodeWorkloads: []string{ workload.NameQueries, }, timeLimit: timeLimitShortSGX, livenessCheckInterval: livenessCheckInterval, consensusPruneDisabledProbability: 0.1, consensusPruneMinKept: 100, consensusPruneMaxKept: 200, numValidatorNodes: 3, numKeyManagerNodes: 1, numStorageNodes: 2, numComputeNodes: 4, numClientNodes: 1, }
TxSourceMultiShortSGX uses multiple workloads for a short time.
Functions ¶
func RegisterScenarios ¶
func RegisterScenarios() error
RegisterScenarios registers all end-to-end scenarios.
Types ¶
This section is empty.
Source Files ¶
- byzantine.go
- client_expire.go
- dump_restore.go
- gas_fees.go
- governance_upgrade.go
- halt_restore.go
- history_reindex.go
- keymanager_replicate.go
- keymanager_restart.go
- keymanager_upgrade.go
- late_start.go
- multiple_runtimes.go
- node_shutdown.go
- runtime.go
- runtime_dynamic.go
- runtime_governance.go
- runtime_message.go
- runtime_prune.go
- runtime_upgrade.go
- sentry.go
- storage_sync.go
- storage_sync_from_registered.go
- txsource.go