Documentation ¶
Index ¶
- Constants
- Variables
- func GetSudoPassword(ctx *pulumi.Context, isLocal bool) pulumi.StringOutput
- func LaunchVMCollections(vmCollections []*VMCollection, depends []pulumi.Resource) ([]pulumi.Resource, error)
- func Run(ctx *pulumi.Context) error
- func WithLocalRunner(runner *command.LocalRunner) func(*Runner)
- func WithRemoteRunner(runner *command.Runner) func(*Runner)
- type Domain
- type Instance
- type InstanceEnvironment
- type LibvirtFilesystem
- type LibvirtPool
- type LibvirtProvider
- type LibvirtProviderFn
- type LibvirtVolume
- type Runner
- func (a *Runner) Command(name string, args *command.Args, opts ...pulumi.ResourceOption) (pulumi.Resource, error)
- func (a *Runner) GetLocalRunner() (*command.LocalRunner, error)
- func (a *Runner) GetRemoteRunner() (*command.Runner, error)
- func (a *Runner) LocalCommand(name string, args *command.Args, opts ...pulumi.ResourceOption) (*local.Command, error)
- func (a *Runner) RemoteCommand(name string, args *command.Args, opts ...pulumi.ResourceOption) (*remote.Command, error)
- type ScenarioDone
- type VMCollection
Constants ¶
const DockerMountpoint = "/mnt/docker"
const (
LocalVMSet = "local"
)
const (
RootMountpoint = "/"
)
Variables ¶
var ( ErrVMSetsNotMapped = errors.New("vmsets must be mapped to collection before building pools") ErrInvalidPoolSize = errors.New("ram backed pool must have size specified in megabytes or gigabytes with the appropriate suffix 'M' or 'G'") ErrZeroRAMDiskSize = errors.New("ram disk size not provided") )
var GetWorkingDirectory func(string) string
GetWorkingDirectory is a function that returns the working directory for the kernel matrix testing, given the architecture (local or non-local).
var SSHKeyFileNames = map[string]string{ ec2.AMD64Arch: libvirtSSHPrivateKeyX86, ec2.ARM64Arch: libvirtSSHPrivateKeyArm, }
var SudoPasswordLocal pulumi.StringOutput
var SudoPasswordRemote pulumi.StringOutput
Functions ¶
func GetSudoPassword ¶
func GetSudoPassword(ctx *pulumi.Context, isLocal bool) pulumi.StringOutput
func LaunchVMCollections ¶
func WithLocalRunner ¶
func WithLocalRunner(runner *command.LocalRunner) func(*Runner)
func WithRemoteRunner ¶
Types ¶
type Domain ¶
type Domain struct { resources.RecipeLibvirtDomainArgs // contains filtered or unexported fields }
type Instance ¶
type Instance struct { Arch string // contains filtered or unexported fields }
func (*Instance) IsMacOSHost ¶
type InstanceEnvironment ¶
type InstanceEnvironment struct { *commonConfig.CommonEnvironment *aws.Environment }
type LibvirtFilesystem ¶
type LibvirtFilesystem struct {
// contains filtered or unexported fields
}
func NewLibvirtFSCustomRecipe ¶
func NewLibvirtFSCustomRecipe(ctx *pulumi.Context, vmset *vmconfig.VMSet, pools map[vmconfig.PoolType]LibvirtPool) *LibvirtFilesystem
vms created with the custom recipe all share the same debian based backing filesystem image.
func NewLibvirtFSDistroRecipe ¶
func NewLibvirtFSDistroRecipe(ctx *pulumi.Context, vmset *vmconfig.VMSet, pools map[vmconfig.PoolType]LibvirtPool) *LibvirtFilesystem
vms created with the distro recipe can have different backing filesystem images for different VMs. For example ubuntu and fedora VMs would have different backing images.
func (*LibvirtFilesystem) SetupLibvirtFilesystem ¶
func (fs *LibvirtFilesystem) SetupLibvirtFilesystem(providerFn LibvirtProviderFn, runner *Runner, depends []pulumi.Resource) ([]pulumi.Resource, error)
type LibvirtPool ¶
type LibvirtPool interface { SetupLibvirtPool(ctx *pulumi.Context, runner *Runner, providerFn LibvirtProviderFn, isLocal bool, depends []pulumi.Resource) ([]pulumi.Resource, error) Name() string Type() vmconfig.PoolType Path() string }
func NewGlobalLibvirtPool ¶
func NewGlobalLibvirtPool(ctx *pulumi.Context) LibvirtPool
func NewRAMBackedLibvirtPool ¶
type LibvirtProvider ¶
type LibvirtProvider struct {
// contains filtered or unexported fields
}
type LibvirtProviderFn ¶
type LibvirtVolume ¶
type LibvirtVolume interface { SetupLibvirtVMVolume(ctx *pulumi.Context, runner *Runner, providerFn LibvirtProviderFn, isLocal bool, depends []pulumi.Resource) (pulumi.Resource, error) UnderlyingImage() *filesystemImage FullResourceName(...string) string Key() string Pool() LibvirtPool Mountpoint() string }
func NewLibvirtVolume ¶
func NewLibvirtVolume( pool LibvirtPool, fsImage filesystemImage, xmlDataFn func(string, vmconfig.PoolType) pulumi.StringOutput, volNamerFn func(string) namer.Namer, mountpoint string, ) LibvirtVolume
type Runner ¶
type Runner struct {
// contains filtered or unexported fields
}
func (*Runner) GetLocalRunner ¶
func (a *Runner) GetLocalRunner() (*command.LocalRunner, error)
func (*Runner) LocalCommand ¶
type ScenarioDone ¶
func RunAndReturnInstances ¶
func RunAndReturnInstances(e commonConfig.CommonEnvironment) (*ScenarioDone, error)
type VMCollection ¶
type VMCollection struct { LibvirtProvider // contains filtered or unexported fields }
Each VMCollection represents the resources needed to setup a collection of libvirt VMs per instance. Each VMCollection corresponds to a single instance It is composed of instance: The instance on which the components of this VMCollection will be created. vmsets: The VMSet which will be part of this collection pool: The libvirt pool which will be shared across all vmsets in the collection. fs: This is the filesystem consisting of pools and basevolumes. Each VMSet will result in 1 filesystems. domains: This represents a single libvirt VM. Each VMSet will result in 1 or more domains to be built.
func BuildVMCollections ¶
func (*VMCollection) SetupCollectionDomainConfigurations ¶
func (*VMCollection) SetupCollectionFilesystems ¶
func (*VMCollection) SetupCollectionNetwork ¶
func (vm *VMCollection) SetupCollectionNetwork(depends []pulumi.Resource) error