runtime

package
v0.2202.13 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 30, 2024 License: Apache-2.0 Imports: 75 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// LogEventTrustRootChangeNoTrust is the event emitted when a compute
	// worker or a key manager node fails to initialize the verifier as there
	// is not enough trust in the new light block.
	LogEventTrustRootChangeNoTrust = "consensus/tendermint/verifier/chain_context/no_trust"

	// LogEventTrustRootChangeFailed is the event emitted when a compute
	// worker or a key manager node fails to initialize the verifier as
	// the new light block is invalid, e.g. has lower height than the last
	// known trusted block.
	LogEventTrustRootChangeFailed = "consensus/tendermint/verifier/chain_context/failed"
)

Variables

View Source
var (

	// ByzantineExecutorHonest is the byzantine executor honest scenario.
	ByzantineExecutorHonest scenario.Scenario = newByzantineImpl(
		"executor-honest",
		"executor",
		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,
		nil,
		nil,
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
		},
	)
	// ByzantineExecutorSchedulerHonest is the byzantine executor scheduler honest scenario.
	ByzantineExecutorSchedulerHonest scenario.Scenario = newByzantineImpl(
		"executor-scheduler-honest",
		"executor",
		nil,
		oasis.ByzantineSlot1IdentitySeed,
		false,
		nil,
		[]oasis.Argument{
			{Name: byzantine.CfgSchedulerRoleExpected},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:        scheduler.KindComputeExecutor,
			Roles:       []scheduler.Role{scheduler.RoleWorker},
			IsScheduler: true,
		},
	)
	// ByzantineExecutorWrong is the byzantine executor wrong scenario.
	ByzantineExecutorWrong scenario.Scenario = newByzantineImpl(
		"executor-wrong",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeIncorrectResults: 1,
			staking.SlashRuntimeLiveness:         1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorWrong.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
		},
	)
	// ByzantineExecutorSchedulerWrong is the byzantine executor wrong scheduler scenario.
	ByzantineExecutorSchedulerWrong scenario.Scenario = newByzantineImpl(
		"executor-scheduler-wrong",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgSchedulerRoleExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorWrong.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:        scheduler.KindComputeExecutor,
			Roles:       []scheduler.Role{scheduler.RoleWorker},
			IsScheduler: true,
		},
	)
	// ByzantineExecutorSchedulerBogus is the byzantine executor scheduler with bogus txs scenario.
	ByzantineExecutorSchedulerBogus scenario.Scenario = newByzantineImpl(
		"executor-scheduler-bogus",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgSchedulerRoleExpected},
			{Name: byzantine.CfgExecutorProposeBogusTx},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:        scheduler.KindComputeExecutor,
			Roles:       []scheduler.Role{scheduler.RoleWorker},
			IsScheduler: true,
		},
	)
	// ByzantineExecutorStraggler is the byzantine executor straggler scenario.
	ByzantineExecutorStraggler scenario.Scenario = newByzantineImpl(
		"executor-straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
		},
	)
	// ByzantineExecutorStragglerBackup is the byzantine executor straggler scenario where the
	// byzantine node is both primary and backup.
	ByzantineExecutorStragglerBackup scenario.Scenario = newByzantineImpl(
		"executor-straggler-backup",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertNoExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 0,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker, scheduler.RoleBackupWorker},
		},
		withCustomRuntimeConfig(func(rt *oasis.RuntimeFixture) {

			rt.Executor.AllowedStragglers = 1

			rt.Executor.GroupBackupSize = 3
		}),
	)
	// ByzantineExecutorSchedulerStraggler is the byzantine executor scheduler straggler scenario.
	ByzantineExecutorSchedulerStraggler scenario.Scenario = newByzantineImpl(
		"executor-scheduler-straggler",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgSchedulerRoleExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorStraggler.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:        scheduler.KindComputeExecutor,
			Roles:       []scheduler.Role{scheduler.RoleWorker},
			IsScheduler: true,
		},
	)
	// ByzantineExecutorFailureIndicating is the byzantine executor that submits failure indicating
	// commitments scenario.
	ByzantineExecutorFailureIndicating scenario.Scenario = newByzantineImpl(
		"executor-failure-indicating",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertNoTimeouts(),
			oasis.LogAssertNoRoundFailures(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineDefaultIdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
		},
	)
	// ByzantineExecutorSchedulerFailureIndicating is the byzantine executor scheduler failure indicating scenario.
	ByzantineExecutorSchedulerFailureIndicating scenario.Scenario = newByzantineImpl(
		"executor-scheduler-failure-indicating",
		"executor",
		[]log.WatcherHandlerFactory{

			oasis.LogAssertRoundFailures(),
			oasis.LogAssertTimeouts(),
			oasis.LogAssertExecutionDiscrepancyDetected(),
		},
		oasis.ByzantineSlot1IdentitySeed,
		false,

		map[staking.SlashReason]uint64{
			staking.SlashRuntimeLiveness: 1,
		},
		[]oasis.Argument{
			{Name: byzantine.CfgSchedulerRoleExpected},
			{Name: byzantine.CfgExecutorMode, Values: []string{byzantine.ModeExecutorFailureIndicating.String()}},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:        scheduler.KindComputeExecutor,
			Roles:       []scheduler.Role{scheduler.RoleWorker},
			IsScheduler: true,
		},
	)
	// ByzantineExecutorCorruptGetDiff is the byzantine executor node scenario that corrupts GetDiff
	// responses.
	ByzantineExecutorCorruptGetDiff scenario.Scenario = newByzantineImpl(
		"executor-corrupt-getdiff",
		"executor",

		nil,
		oasis.ByzantineDefaultIdentitySeed,
		false,
		nil,
		[]oasis.Argument{

			{Name: byzantine.CfgCorruptGetDiff},
		},
		scheduler.ForceElectCommitteeRole{
			Kind:  scheduler.KindComputeExecutor,
			Roles: []scheduler.Role{scheduler.RoleWorker},
		},
	)
)
View Source
var (
	// DumpRestore is the dump and restore scenario.
	DumpRestore scenario.Scenario = newDumpRestoreImpl("dump-restore", nil)

	// DumpRestoreRuntimeRoundAdvance is the scenario where additional rounds are simulated after
	// the runtime stopped in the old network (so storage node state is behind).
	DumpRestoreRuntimeRoundAdvance scenario.Scenario = newDumpRestoreImpl(
		"dump-restore/runtime-round-advance",
		func(doc *genesis.Document) {

			for _, st := range doc.RootHash.RuntimeStates {
				st.Round += 10
			}
		},
	)
)
View Source
var (
	// GovernanceConsensusUpgrade is the governance consensus upgrade scenario.
	GovernanceConsensusUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, false)
	// GovernanceConsensusFailUpgrade is the governance consensus upgrade scenario
	// where node should fail the upgrade.
	GovernanceConsensusFailUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(false, false)
	// GovernanceConsensusCancelUpgrade is the governance consensus upgrade scenario
	// where the pending upgrade is canceled.
	GovernanceConsensusCancelUpgrade scenario.Scenario = newGovernanceConsensusUpgradeImpl(true, true)
)
View Source
var (
	// HaltRestore is the halt and restore scenario.
	HaltRestore scenario.Scenario = newHaltRestoreImpl(false)
	// HaltRestoreSuspended is the halt and restore scenario with a suspended runtime.
	HaltRestoreSuspended scenario.Scenario = newHaltRestoreImpl(true)
)
View Source
var (
	// ParamsDummyScenario is a dummy instance of runtimeImpl used to register global e2e/runtime flags.
	ParamsDummyScenario = NewScenario("", nil)

	// Runtime is the basic network + client test case with runtime support.
	Runtime scenario.Scenario = NewScenario(
		"runtime",
		NewKVTestClient().WithScenario(SimpleKeyValueScenario),
	)

	// RuntimeEncryption is the basic network + client with encryption test case.
	RuntimeEncryption scenario.Scenario = NewScenario(
		"runtime-encryption",
		NewKVTestClient().WithScenario(InsertRemoveKeyValueEncScenario),
	)

	// DefaultRuntimeLogWatcherHandlerFactories is a list of default log watcher
	// handler factories for the basic scenario.
	DefaultRuntimeLogWatcherHandlerFactories = []log.WatcherHandlerFactory{
		oasis.LogAssertNoTimeouts(),
		oasis.LogAssertNoRoundFailures(),
		oasis.LogAssertNoExecutionDiscrepancyDetected(),
	}
)
View Source
var (
	InsertKeyValueScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", false},
		GetKeyValueTx{"my_key", "my_value", false},
	})

	InsertKeyValueEncScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", true},
		GetKeyValueTx{"my_key", "my_value", true},
	})

	RemoveKeyValueScenario = NewTestClientScenario([]interface{}{
		GetKeyValueTx{"my_key", "my_value", false},
	})

	RemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{
		GetKeyValueTx{"my_key", "my_value", true},
	})

	InsertTransferKeyValueScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", false},
		GetKeyValueTx{"my_key", "my_value", false},
		ConsensusTransferTx{},
	})

	InsertRemoveKeyValueEncScenario = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key", "my_value", "", true},
		GetKeyValueTx{"my_key", "my_value", true},
		RemoveKeyValueTx{"my_key", "my_value", true},
		GetKeyValueTx{"my_key", "", true},
	})

	InsertRemoveKeyValueEncScenarioV2 = NewTestClientScenario([]interface{}{
		InsertKeyValueTx{"my_key2", "my_value2", "", true},
		GetKeyValueTx{"my_key2", "my_value2", true},
		RemoveKeyValueTx{"my_key2", "my_value2", true},
		GetKeyValueTx{"my_key2", "", true},
	})

	SimpleKeyValueScenario = newSimpleKeyValueScenario(false, false)

	SimpleKeyValueEncScenario = newSimpleKeyValueScenario(false, true)

	SimpleKeyValueScenarioRepeated = newSimpleKeyValueScenario(true, false)
)
View Source
var (
	// TrustRootChangeTest is a happy path scenario which tests if trust
	// can be transferred to a new light block when consensus chain context
	// changes, e.g. on dump-restore network upgrades.
	TrustRootChangeTest scenario.Scenario = newTrustRootChangeImpl(
		"change",
		NewKVTestClient().WithScenario(InsertKeyValueEncScenario),
		true,
	)

	// TrustRootChangeFailsTest is an unhappy path scenario which tests
	// that trust is never transferred to untrusted or invalid light blocks when
	// consensus chain context changes.
	TrustRootChangeFailsTest scenario.Scenario = newTrustRootChangeImpl(
		"change-fails",
		NewKVTestClient().WithScenario(SimpleKeyValueEncScenario),
		false,
	)
)
View Source
var ArchiveAPI scenario.Scenario = &archiveAPI{
	Scenario: *NewScenario(
		"archive-api",
		NewKVTestClient().WithScenario(InsertTransferKeyValueScenario),
	),
}

ArchiveAPI is the scenario where archive node control, consensus and runtime APIs are tested.

View Source
var GasFeesRuntimes scenario.Scenario = &gasFeesRuntimesImpl{
	Scenario: *NewScenario("gas-fees/runtimes", nil),
}

GasFeesRuntimes is the runtime gas fees scenario.

View Source
var HaltRestoreNonMock scenario.Scenario = newHaltRestoreNonMockImpl()

HaltRestoreNonMock is the halt and restore scenario that uses the real beacon backend.

View Source
var HistoryReindex scenario.Scenario = newHistoryReindexImpl()

HistoryReindex is the scenario that triggers roothash history reindexing.

View Source
var KeymanagerKeyGeneration scenario.Scenario = newKmKeyGenerationImpl()

KeymanagerKeyGeneration is the keymanager key generation scenario.

It uses encryption and decryption transactions provided by the simple key/value runtime to test whether the key manager client can retrieve private and public ephemeral keys from the key manager and if the latter generates those according to the specifications.

View Source
var KeymanagerReplicate scenario.Scenario = newKmReplicateImpl()

KeymanagerReplicate is the keymanager replication scenario.

View Source
var KeymanagerRestart scenario.Scenario = newKmRestartImpl()

KeymanagerRestart is the keymanager restart scenario.

View Source
var KeymanagerUpgrade scenario.Scenario = newKmUpgradeImpl()

KeymanagerUpgrade is the keymanager upgrade scenario.

View Source
var LateStart scenario.Scenario = newLateStartImpl("late-start")

LateStart is the LateStart node basic scenario.

View Source
var MultipleRuntimes = func() scenario.Scenario {
	sc := &multipleRuntimesImpl{
		Scenario: *NewScenario("multiple-runtimes", nil),
	}
	sc.Flags.Int(cfgNumComputeRuntimes, 2, "number of compute runtimes per worker")
	sc.Flags.Int(cfgNumComputeRuntimeTxns, 2, "number of transactions to perform")
	sc.Flags.Int(cfgNumComputeWorkers, 2, "number of workers to initiate")
	sc.Flags.Uint16(cfgExecutorGroupSize, 2, "number of executor workers in committee")

	return sc
}()

MultipleRuntimes is a scenario which tests running multiple runtimes on one node.

View Source
var NodeShutdown scenario.Scenario = newNodeShutdownImpl()

NodeShutdown is the keymanager restart scenario.

View Source
var OffsetRestart scenario.Scenario = newOffsetRestartImpl()

OffsetRestart is the offset restart scenario..

View Source
var RuntimeDynamic scenario.Scenario = newRuntimeDynamicImpl()

RuntimeDynamic is the dynamic runtime registration scenario.

View Source
var RuntimeGovernance = func() scenario.Scenario {
	sc := &runtimeGovernanceImpl{
		Scenario: *NewScenario("runtime-governance", nil),
	}
	return sc
}()

RuntimeGovernance is a scenario which tests runtime governance.

Two runtimes with the runtime governance model are created at genesis time. We submit an update_runtime runtime transaction with a slightly modified runtime descriptor to the first runtime. This transaction triggers the runtime to emit an update_runtime message, which in turn causes the runtime to be re-registered with the updated descriptor specified in the message. After an epoch transition, we fetch the runtime descriptor from the registry and check if the modification took place or not.

Additionally, we test that a runtime cannot update another runtime by passing a modified other runtime's descriptor to the update_runtime call of another runtime.

View Source
var (
	// RuntimeMessage is the runtime message scenario.
	RuntimeMessage scenario.Scenario = newRuntimeMessage()
)
View Source
var RuntimePrune scenario.Scenario = newRuntimePruneImpl()

RuntimePrune is the runtime prune scenario.

View Source
var RuntimeUpgrade scenario.Scenario = newRuntimeUpgradeImpl()

RuntimeUpgrade is the runtime upgrade scenario.

View Source
var (
	// Sentry is the Sentry node basic scenario.
	Sentry scenario.Scenario = newSentryImpl()
)
View Source
var StorageEarlyStateSync scenario.Scenario = newStorageEarlyStateSyncImpl()

StorageEarlyStateSync is the scenario where a runtime is registered first and is not yet operational, then a while later an executor node uses consensus layer state sync to catch up but the runtime has already advanced some epoch transition rounds and is no longer at genesis.

View Source
var StorageSync scenario.Scenario = newStorageSyncImpl()

StorageSync is the storage sync scenario.

View Source
var StorageSyncFromRegistered scenario.Scenario = newStorageSyncFromRegisteredImpl()

StorageSyncFromRegistered is the storage sync scenario which tests syncing from registered nodes not in committee.

View Source
var StorageSyncInconsistent scenario.Scenario = newStorageSyncInconsistentImpl()

StorageSyncInconsistent is the inconsistent storage sync scenario.

TrustRoot is the consensus trust root verification scenario.

View Source
var TxSourceMulti scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitLong,
	nodeRestartInterval:               nodeRestartIntervalLong,
	nodeLongRestartInterval:           nodeLongRestartInterval,
	nodeLongRestartDuration:           nodeLongRestartDuration,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             1000,
	enableCrashPoints:                 true,

	tendermintRecoverCorruptedWAL: true,

	numValidatorNodes: 4,

	numKeyManagerNodes: 2,

	numComputeNodes: 5,

	numClientNodes: 2,
}

TxSourceMulti uses multiple workloads.

View Source
var TxSourceMultiShort scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi-short", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitShort,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             200,
	numValidatorNodes:                 4,
	numKeyManagerNodes:                2,
	numComputeNodes:                   4,
	numClientNodes:                    2,
}

TxSourceMultiShort uses multiple workloads for a short time.

View Source
var TxSourceMultiShortSGX scenario.Scenario = &txSourceImpl{
	Scenario: *NewScenario("txsource-multi-short-sgx", nil),
	clientWorkloads: []string{
		workload.NameCommission,
		workload.NameDelegation,
		workload.NameOversized,
		workload.NameParallel,
		workload.NameRegistration,
		workload.NameRuntime,
		workload.NameTransfer,
		workload.NameGovernance,
	},
	allNodeWorkloads: []string{
		workload.NameQueries,
	},
	timeLimit:                         timeLimitShortSGX,
	livenessCheckInterval:             livenessCheckInterval,
	consensusPruneDisabledProbability: 0.1,
	consensusPruneMinKept:             100,
	consensusPruneMaxKept:             200,

	numValidatorNodes:  2,
	numKeyManagerNodes: 1,
	numComputeNodes:    2,
	numClientNodes:     1,
}

TxSourceMultiShortSGX uses multiple workloads for a short time.

Functions

func RegisterScenarios

func RegisterScenarios() error

RegisterScenarios registers all end-to-end scenarios.

Types

type ConsensusAccountsTx added in v0.2202.9

type ConsensusAccountsTx struct{}

ConsensusAccountsTx tests consensus account query.

type ConsensusTransferTx added in v0.2202.9

type ConsensusTransferTx struct{}

ConsensusTransferTx submits and empty consensus staking transfer.

type GetKeyValueTx added in v0.2202.9

type GetKeyValueTx struct {
	Key       string
	Response  string
	Encrypted bool
}

GetKeyValueTx retrieves the value stored under the given key from the database, and verifies that the response (current value) contains the expected data.

type GetRuntimeIDTx added in v0.2202.9

type GetRuntimeIDTx struct{}

GetRuntimeIDTx retrieves the runtime ID.

type InsertKeyValueTx added in v0.2202.9

type InsertKeyValueTx struct {
	Key       string
	Value     string
	Response  string
	Encrypted bool
}

InsertKeyValueTx inserts a key/value pair to the database, and verifies that the response (previous value) contains the expected data.

type InsertMsg added in v0.2202.9

type InsertMsg struct {
	Key       string
	Value     string
	Encrypted bool
}

InsertMsg inserts an incoming runtime message.

type KVTestClient added in v0.2202.9

type KVTestClient struct {
	// contains filtered or unexported fields
}

KVTestClient is a client that exercises the simple key-value test runtime.

func NewKVTestClient added in v0.2202.9

func NewKVTestClient() *KVTestClient

func (*KVTestClient) Clone added in v0.2202.9

func (cli *KVTestClient) Clone() TestClient

func (*KVTestClient) Init added in v0.2202.9

func (cli *KVTestClient) Init(scenario *Scenario) error

func (*KVTestClient) Start added in v0.2202.9

func (cli *KVTestClient) Start(ctx context.Context, childEnv *env.Env) error

func (*KVTestClient) Stop added in v0.2202.9

func (cli *KVTestClient) Stop() error

func (*KVTestClient) Wait added in v0.2202.9

func (cli *KVTestClient) Wait() error

func (*KVTestClient) WithScenario added in v0.2202.9

func (cli *KVTestClient) WithScenario(scenario TestClientScenario) *KVTestClient

func (*KVTestClient) WithSeed added in v0.2202.9

func (cli *KVTestClient) WithSeed(seed string) *KVTestClient

type KeyValueQuery added in v0.2202.10

type KeyValueQuery struct {
	Key      string
	Response string
	Round    uint64
}

KeyValueQuery queries the value stored under the given key for the specified round from the database, and verifies that the response (current value) contains the expected data.

type RemoveKeyValueTx added in v0.2202.9

type RemoveKeyValueTx struct {
	Key       string
	Response  string
	Encrypted bool
}

RemoveKeyValueTx removes the value stored under the given key from the database.

type Scenario added in v0.2202.9

type Scenario struct {
	e2e.Scenario
	// contains filtered or unexported fields
}

Scenario is a base class for tests involving oasis-node with runtime.

func NewScenario added in v0.2202.9

func NewScenario(name string, testClient TestClient) *Scenario

NewScenario creates a new base scenario for oasis-node runtime end-to-end tests.

func (*Scenario) Clone added in v0.2202.9

func (sc *Scenario) Clone() scenario.Scenario

func (*Scenario) Fixture added in v0.2202.9

func (sc *Scenario) Fixture() (*oasis.NetworkFixture, error)

func (*Scenario) PreInit added in v0.2202.9

func (sc *Scenario) PreInit(childEnv *env.Env) error

func (*Scenario) ResolveRuntimeBinaries added in v0.2202.12

func (sc *Scenario) ResolveRuntimeBinaries(runtimeBinary string) map[node.TEEHardware]string

ResolveRuntimeBinaries returns the paths to the runtime binaries.

func (*Scenario) ResolveRuntimeBinary added in v0.2202.12

func (sc *Scenario) ResolveRuntimeBinary(runtimeBinary string, tee node.TEEHardware) string

ResolveRuntimeBinary returns the path to the runtime binary.

func (*Scenario) ResolveRuntimeUpgradeBinaries added in v0.2202.12

func (sc *Scenario) ResolveRuntimeUpgradeBinaries(runtimeBinary string) map[node.TEEHardware]string

ResolveRuntimeUpgradeBinaries returns the paths to the runtime upgrade binaries.

func (*Scenario) ResolveRuntimeUpgradeBinary added in v0.2202.12

func (sc *Scenario) ResolveRuntimeUpgradeBinary(runtimeBinary string, tee node.TEEHardware) string

ResolveRuntimeUpgradeBinary returns the path to the runtime upgrade binary.

func (*Scenario) Run added in v0.2202.9

func (sc *Scenario) Run(ctx context.Context, childEnv *env.Env) error

func (*Scenario) StartNetworkAndTestClient added in v0.2202.9

func (sc *Scenario) StartNetworkAndTestClient(ctx context.Context, childEnv *env.Env) error

StartNetworkAndTestClient starts the network and the runtime test client.

func (*Scenario) StartNetworkAndWaitForClientSync added in v0.2202.9

func (sc *Scenario) StartNetworkAndWaitForClientSync(ctx context.Context) error

StartNetworkAndWaitForClientSync starts the network and waits for the client node to sync.

func (*Scenario) WaitTestClientOnly added in v0.2202.9

func (sc *Scenario) WaitTestClientOnly() error

WaitTestClientOnly waits for the runtime test client to finish its work.

type TestClient added in v0.2103.0

type TestClient interface {
	Init(*Scenario) error
	Start(context.Context, *env.Env) error
	Wait() error

	// Clone returns a clone of a RuntimeTestClient instance, in a state
	// that is ready for Init.
	Clone() TestClient
}

TestClient is the interface exposed to implement a runtime test client that executes a pre-determined workload against a given runtime.

type TestClientScenario added in v0.2202.9

type TestClientScenario func(submit func(req interface{}) error) error

TestClientScenario is a test scenario for a key-value runtime test client.

func NewTestClientScenario added in v0.2202.9

func NewTestClientScenario(requests []interface{}) TestClientScenario

NewTestClientScenario creates a new test client scenario.

type TrustRootImpl added in v0.2202.9

type TrustRootImpl struct {
	Scenario
}

func NewTrustRootImpl added in v0.2202.9

func NewTrustRootImpl(name string, testClient TestClient) *TrustRootImpl

func (*TrustRootImpl) Clone added in v0.2202.9

func (sc *TrustRootImpl) Clone() scenario.Scenario

func (*TrustRootImpl) Fixture added in v0.2202.9

func (sc *TrustRootImpl) Fixture() (*oasis.NetworkFixture, error)

func (*TrustRootImpl) PostRun added in v0.2202.10

func (sc *TrustRootImpl) PostRun(ctx context.Context, childEnv *env.Env) error

PostRun re-builds simple key/value and key manager runtimes.

func (*TrustRootImpl) PreRun added in v0.2202.10

func (sc *TrustRootImpl) PreRun(ctx context.Context, childEnv *env.Env) (err error)

PreRun starts the network, prepares a trust root, builds simple key/value and key manager runtimes, prepares runtime bundles, and runs the test client.

func (*TrustRootImpl) Run added in v0.2202.9

func (sc *TrustRootImpl) Run(ctx context.Context, childEnv *env.Env) (err error)

type TxnCall added in v0.2103.0

type TxnCall struct {
	// Nonce is a nonce.
	Nonce uint64 `json:"nonce"`
	// Method is the called method name.
	Method string `json:"method"`
	// Args are the method arguments.
	Args interface{} `json:"args"`
}

TxnCall is a transaction call in the test runtime.

type TxnOutput added in v0.2103.0

type TxnOutput struct {
	// Success can be of any type.
	Success cbor.RawMessage
	// Error is a string describing the error message.
	Error *string
}

TxnOutput is a transaction call output in the test runtime.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL