v1beta2

package
v1.13.0-rc.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 22, 2024 License: Apache-2.0 Imports: 15 Imported by: 0

Documentation

Overview

+kubebuilder:object:generate=true +groupName=dms.aws.upbound.io +versionName=v1beta2

Index

Constants

View Source
const (
	CRDGroup   = "dms.aws.upbound.io"
	CRDVersion = "v1beta2"
)

Package type metadata.

Variables

View Source
var (
	Endpoint_Kind             = "Endpoint"
	Endpoint_GroupKind        = schema.GroupKind{Group: CRDGroup, Kind: Endpoint_Kind}.String()
	Endpoint_KindAPIVersion   = Endpoint_Kind + "." + CRDGroupVersion.String()
	Endpoint_GroupVersionKind = CRDGroupVersion.WithKind(Endpoint_Kind)
)

Repository type metadata.

View Source
var (
	// CRDGroupVersion is the API Group Version used to register the objects
	CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion}

	// SchemeBuilder is used to add go types to the GroupVersionKind scheme
	SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion}

	// AddToScheme adds the types in this group-version to the given scheme.
	AddToScheme = SchemeBuilder.AddToScheme
)

Functions

This section is empty.

Types

type ElasticsearchSettingsInitParameters

type ElasticsearchSettingsInitParameters struct {

	// Endpoint for the OpenSearch cluster.
	EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"`

	// Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
	ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"`

	// Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
	FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"`

	// ARN of the IAM Role with permissions to write to the OpenSearch cluster.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
	UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"`
}

func (*ElasticsearchSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsInitParameters.

func (*ElasticsearchSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchSettingsObservation

type ElasticsearchSettingsObservation struct {

	// Endpoint for the OpenSearch cluster.
	EndpointURI *string `json:"endpointUri,omitempty" tf:"endpoint_uri,omitempty"`

	// Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
	ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"`

	// Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
	FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"`

	// ARN of the IAM Role with permissions to write to the OpenSearch cluster.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
	UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"`
}

func (*ElasticsearchSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsObservation.

func (*ElasticsearchSettingsObservation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type ElasticsearchSettingsParameters

type ElasticsearchSettingsParameters struct {

	// Endpoint for the OpenSearch cluster.
	// +kubebuilder:validation:Optional
	EndpointURI *string `json:"endpointUri" tf:"endpoint_uri,omitempty"`

	// Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
	// +kubebuilder:validation:Optional
	ErrorRetryDuration *float64 `json:"errorRetryDuration,omitempty" tf:"error_retry_duration,omitempty"`

	// Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
	// +kubebuilder:validation:Optional
	FullLoadErrorPercentage *float64 `json:"fullLoadErrorPercentage,omitempty" tf:"full_load_error_percentage,omitempty"`

	// ARN of the IAM Role with permissions to write to the OpenSearch cluster.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn" tf:"service_access_role_arn,omitempty"`

	// Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
	// +kubebuilder:validation:Optional
	UseNewMappingType *bool `json:"useNewMappingType,omitempty" tf:"use_new_mapping_type,omitempty"`
}

func (*ElasticsearchSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSettingsParameters.

func (*ElasticsearchSettingsParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type Endpoint

type Endpoint struct {
	metav1.TypeMeta   `json:",inline"`
	metav1.ObjectMeta `json:"metadata,omitempty"`
	// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.endpointType) || (has(self.initProvider) && has(self.initProvider.endpointType))",message="spec.forProvider.endpointType is a required parameter"
	// +kubebuilder:validation:XValidation:rule="!('*' in self.managementPolicies || 'Create' in self.managementPolicies || 'Update' in self.managementPolicies) || has(self.forProvider.engineName) || (has(self.initProvider) && has(self.initProvider.engineName))",message="spec.forProvider.engineName is a required parameter"
	Spec   EndpointSpec   `json:"spec"`
	Status EndpointStatus `json:"status,omitempty"`
}

Endpoint is the Schema for the Endpoints API. Provides a DMS (Data Migration Service) endpoint resource. +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,aws}

func (*Endpoint) DeepCopy

func (in *Endpoint) DeepCopy() *Endpoint

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint.

func (*Endpoint) DeepCopyInto

func (in *Endpoint) DeepCopyInto(out *Endpoint)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*Endpoint) DeepCopyObject

func (in *Endpoint) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*Endpoint) GetCondition

func (mg *Endpoint) GetCondition(ct xpv1.ConditionType) xpv1.Condition

GetCondition of this Endpoint.

func (*Endpoint) GetConnectionDetailsMapping

func (tr *Endpoint) GetConnectionDetailsMapping() map[string]string

GetConnectionDetailsMapping for this Endpoint

func (*Endpoint) GetDeletionPolicy

func (mg *Endpoint) GetDeletionPolicy() xpv1.DeletionPolicy

GetDeletionPolicy of this Endpoint.

func (*Endpoint) GetID

func (tr *Endpoint) GetID() string

GetID returns ID of underlying Terraform resource of this Endpoint

func (*Endpoint) GetInitParameters

func (tr *Endpoint) GetInitParameters() (map[string]any, error)

GetInitParameters of this Endpoint

func (*Endpoint) GetManagementPolicies

func (mg *Endpoint) GetManagementPolicies() xpv1.ManagementPolicies

GetManagementPolicies of this Endpoint.

func (*Endpoint) GetMergedParameters

func (tr *Endpoint) GetMergedParameters(shouldMergeInitProvider bool) (map[string]any, error)

GetInitParameters of this Endpoint

func (*Endpoint) GetObservation

func (tr *Endpoint) GetObservation() (map[string]any, error)

GetObservation of this Endpoint

func (*Endpoint) GetParameters

func (tr *Endpoint) GetParameters() (map[string]any, error)

GetParameters of this Endpoint

func (*Endpoint) GetProviderConfigReference

func (mg *Endpoint) GetProviderConfigReference() *xpv1.Reference

GetProviderConfigReference of this Endpoint.

func (*Endpoint) GetPublishConnectionDetailsTo

func (mg *Endpoint) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo

GetPublishConnectionDetailsTo of this Endpoint.

func (*Endpoint) GetTerraformResourceType

func (mg *Endpoint) GetTerraformResourceType() string

GetTerraformResourceType returns Terraform resource type for this Endpoint

func (*Endpoint) GetTerraformSchemaVersion

func (tr *Endpoint) GetTerraformSchemaVersion() int

GetTerraformSchemaVersion returns the associated Terraform schema version

func (*Endpoint) GetWriteConnectionSecretToReference

func (mg *Endpoint) GetWriteConnectionSecretToReference() *xpv1.SecretReference

GetWriteConnectionSecretToReference of this Endpoint.

func (*Endpoint) Hub

func (tr *Endpoint) Hub()

Hub marks this type as a conversion hub.

func (*Endpoint) LateInitialize

func (tr *Endpoint) LateInitialize(attrs []byte) (bool, error)

LateInitialize this Endpoint using its observed tfState. returns True if there are any spec changes for the resource.

func (*Endpoint) ResolveReferences

func (mg *Endpoint) ResolveReferences(ctx context.Context, c client.Reader) error

func (*Endpoint) SetConditions

func (mg *Endpoint) SetConditions(c ...xpv1.Condition)

SetConditions of this Endpoint.

func (*Endpoint) SetDeletionPolicy

func (mg *Endpoint) SetDeletionPolicy(r xpv1.DeletionPolicy)

SetDeletionPolicy of this Endpoint.

func (*Endpoint) SetManagementPolicies

func (mg *Endpoint) SetManagementPolicies(r xpv1.ManagementPolicies)

SetManagementPolicies of this Endpoint.

func (*Endpoint) SetObservation

func (tr *Endpoint) SetObservation(obs map[string]any) error

SetObservation for this Endpoint

func (*Endpoint) SetParameters

func (tr *Endpoint) SetParameters(params map[string]any) error

SetParameters for this Endpoint

func (*Endpoint) SetProviderConfigReference

func (mg *Endpoint) SetProviderConfigReference(r *xpv1.Reference)

SetProviderConfigReference of this Endpoint.

func (*Endpoint) SetPublishConnectionDetailsTo

func (mg *Endpoint) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)

SetPublishConnectionDetailsTo of this Endpoint.

func (*Endpoint) SetWriteConnectionSecretToReference

func (mg *Endpoint) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)

SetWriteConnectionSecretToReference of this Endpoint.

type EndpointInitParameters

type EndpointInitParameters struct {

	// ARN for the certificate.
	CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"`

	// Name of the endpoint database.
	DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"`

	// Configuration block for OpenSearch settings. See below.
	ElasticsearchSettings *ElasticsearchSettingsInitParameters `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"`

	// Type of endpoint. Valid values are source, target.
	EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"`

	// Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
	EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"`

	// Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
	ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"`

	// ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"`

	// Reference to a Key in kms to populate kmsKeyArn.
	// +kubebuilder:validation:Optional
	KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"`

	// Selector for a Key in kms to populate kmsKeyArn.
	// +kubebuilder:validation:Optional
	KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"`

	// Configuration block for Kafka settings. See below.
	KafkaSettings *KafkaSettingsInitParameters `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"`

	// Configuration block for Kinesis settings. See below.
	KinesisSettings *KinesisSettingsInitParameters `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"`

	// Configuration block for MongoDB settings. See below.
	MongodbSettings *MongodbSettingsInitParameters `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"`

	// Password to be used to login to the endpoint database.
	PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"`

	// Only tasks paused by the resource will be restarted after the modification completes. Default is false.
	PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"`

	// Port used by the endpoint database.
	Port *float64 `json:"port,omitempty" tf:"port,omitempty"`

	// Configuration block for Postgres settings. See below.
	PostgresSettings *PostgresSettingsInitParameters `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"`

	RedisSettings *RedisSettingsInitParameters `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"`

	// Configuration block for Redshift settings. See below.
	RedshiftSettings *RedshiftSettingsInitParameters `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"`

	// (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below.
	// This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead
	S3Settings *S3SettingsInitParameters `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"`

	// SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
	SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"`

	// ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"`

	// Reference to a Role in iam to populate secretsManagerAccessRoleArn.
	// +kubebuilder:validation:Optional
	SecretsManagerAccessRoleArnRef *v1.Reference `json:"secretsManagerAccessRoleArnRef,omitempty" tf:"-"`

	// Selector for a Role in iam to populate secretsManagerAccessRoleArn.
	// +kubebuilder:validation:Optional
	SecretsManagerAccessRoleArnSelector *v1.Selector `json:"secretsManagerAccessRoleArnSelector,omitempty" tf:"-"`

	// text values for username, password , server_name, and port. You can't specify both.
	SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"`

	// Host name of the server.
	ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"`

	// ARN used by the service access IAM role for dynamodb endpoints.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"`

	// Reference to a Role in iam to populate serviceAccessRole.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleRef *v1.Reference `json:"serviceAccessRoleRef,omitempty" tf:"-"`

	// Selector for a Role in iam to populate serviceAccessRole.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleSelector *v1.Selector `json:"serviceAccessRoleSelector,omitempty" tf:"-"`

	// Key-value map of resource tags.
	// +mapType=granular
	Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"`

	// User name to be used to login to the endpoint database.
	Username *string `json:"username,omitempty" tf:"username,omitempty"`
}

func (*EndpointInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointInitParameters.

func (*EndpointInitParameters) DeepCopyInto

func (in *EndpointInitParameters) DeepCopyInto(out *EndpointInitParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type EndpointList

type EndpointList struct {
	metav1.TypeMeta `json:",inline"`
	metav1.ListMeta `json:"metadata,omitempty"`
	Items           []Endpoint `json:"items"`
}

EndpointList contains a list of Endpoints

func (*EndpointList) DeepCopy

func (in *EndpointList) DeepCopy() *EndpointList

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointList.

func (*EndpointList) DeepCopyInto

func (in *EndpointList) DeepCopyInto(out *EndpointList)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

func (*EndpointList) DeepCopyObject

func (in *EndpointList) DeepCopyObject() runtime.Object

DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.

func (*EndpointList) GetItems

func (l *EndpointList) GetItems() []resource.Managed

GetItems of this EndpointList.

type EndpointObservation

type EndpointObservation struct {

	// ARN for the certificate.
	CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"`

	// Name of the endpoint database.
	DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"`

	// Configuration block for OpenSearch settings. See below.
	ElasticsearchSettings *ElasticsearchSettingsObservation `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"`

	// ARN for the endpoint.
	EndpointArn *string `json:"endpointArn,omitempty" tf:"endpoint_arn,omitempty"`

	// Type of endpoint. Valid values are source, target.
	EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"`

	// Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
	EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"`

	// Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
	ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"`

	ID *string `json:"id,omitempty" tf:"id,omitempty"`

	// ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.
	KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"`

	// Configuration block for Kafka settings. See below.
	KafkaSettings *KafkaSettingsObservation `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"`

	// Configuration block for Kinesis settings. See below.
	KinesisSettings *KinesisSettingsObservation `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"`

	// Configuration block for MongoDB settings. See below.
	MongodbSettings *MongodbSettingsObservation `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"`

	// Only tasks paused by the resource will be restarted after the modification completes. Default is false.
	PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"`

	// Port used by the endpoint database.
	Port *float64 `json:"port,omitempty" tf:"port,omitempty"`

	// Configuration block for Postgres settings. See below.
	PostgresSettings *PostgresSettingsObservation `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"`

	RedisSettings *RedisSettingsObservation `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"`

	// Configuration block for Redshift settings. See below.
	RedshiftSettings *RedshiftSettingsObservation `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"`

	// (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below.
	// This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead
	S3Settings *S3SettingsObservation `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"`

	// SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
	SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"`

	// ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.
	SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"`

	// text values for username, password , server_name, and port. You can't specify both.
	SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"`

	// Host name of the server.
	ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"`

	// ARN used by the service access IAM role for dynamodb endpoints.
	ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"`

	// Key-value map of resource tags.
	// +mapType=granular
	Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"`

	// Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.
	// +mapType=granular
	TagsAll map[string]*string `json:"tagsAll,omitempty" tf:"tags_all,omitempty"`

	// User name to be used to login to the endpoint database.
	Username *string `json:"username,omitempty" tf:"username,omitempty"`
}

func (*EndpointObservation) DeepCopy

func (in *EndpointObservation) DeepCopy() *EndpointObservation

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointObservation.

func (*EndpointObservation) DeepCopyInto

func (in *EndpointObservation) DeepCopyInto(out *EndpointObservation)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type EndpointParameters

type EndpointParameters struct {

	// ARN for the certificate.
	// +kubebuilder:validation:Optional
	CertificateArn *string `json:"certificateArn,omitempty" tf:"certificate_arn,omitempty"`

	// Name of the endpoint database.
	// +kubebuilder:validation:Optional
	DatabaseName *string `json:"databaseName,omitempty" tf:"database_name,omitempty"`

	// Configuration block for OpenSearch settings. See below.
	// +kubebuilder:validation:Optional
	ElasticsearchSettings *ElasticsearchSettingsParameters `json:"elasticsearchSettings,omitempty" tf:"elasticsearch_settings,omitempty"`

	// Type of endpoint. Valid values are source, target.
	// +kubebuilder:validation:Optional
	EndpointType *string `json:"endpointType,omitempty" tf:"endpoint_type,omitempty"`

	// Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift, s3, sqlserver, sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
	// +kubebuilder:validation:Optional
	EngineName *string `json:"engineName,omitempty" tf:"engine_name,omitempty"`

	// Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
	// +kubebuilder:validation:Optional
	ExtraConnectionAttributes *string `json:"extraConnectionAttributes,omitempty" tf:"extra_connection_attributes,omitempty"`

	// ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/kms/v1beta1.Key
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	// +kubebuilder:validation:Optional
	KMSKeyArn *string `json:"kmsKeyArn,omitempty" tf:"kms_key_arn,omitempty"`

	// Reference to a Key in kms to populate kmsKeyArn.
	// +kubebuilder:validation:Optional
	KMSKeyArnRef *v1.Reference `json:"kmsKeyArnRef,omitempty" tf:"-"`

	// Selector for a Key in kms to populate kmsKeyArn.
	// +kubebuilder:validation:Optional
	KMSKeyArnSelector *v1.Selector `json:"kmsKeyArnSelector,omitempty" tf:"-"`

	// Configuration block for Kafka settings. See below.
	// +kubebuilder:validation:Optional
	KafkaSettings *KafkaSettingsParameters `json:"kafkaSettings,omitempty" tf:"kafka_settings,omitempty"`

	// Configuration block for Kinesis settings. See below.
	// +kubebuilder:validation:Optional
	KinesisSettings *KinesisSettingsParameters `json:"kinesisSettings,omitempty" tf:"kinesis_settings,omitempty"`

	// Configuration block for MongoDB settings. See below.
	// +kubebuilder:validation:Optional
	MongodbSettings *MongodbSettingsParameters `json:"mongodbSettings,omitempty" tf:"mongodb_settings,omitempty"`

	// Password to be used to login to the endpoint database.
	// +kubebuilder:validation:Optional
	PasswordSecretRef *v1.SecretKeySelector `json:"passwordSecretRef,omitempty" tf:"-"`

	// Only tasks paused by the resource will be restarted after the modification completes. Default is false.
	// +kubebuilder:validation:Optional
	PauseReplicationTasks *bool `json:"pauseReplicationTasks,omitempty" tf:"pause_replication_tasks,omitempty"`

	// Port used by the endpoint database.
	// +kubebuilder:validation:Optional
	Port *float64 `json:"port,omitempty" tf:"port,omitempty"`

	// Configuration block for Postgres settings. See below.
	// +kubebuilder:validation:Optional
	PostgresSettings *PostgresSettingsParameters `json:"postgresSettings,omitempty" tf:"postgres_settings,omitempty"`

	// +kubebuilder:validation:Optional
	RedisSettings *RedisSettingsParameters `json:"redisSettings,omitempty" tf:"redis_settings,omitempty"`

	// Configuration block for Redshift settings. See below.
	// +kubebuilder:validation:Optional
	RedshiftSettings *RedshiftSettingsParameters `json:"redshiftSettings,omitempty" tf:"redshift_settings,omitempty"`

	// Region is the region you'd like your resource to be created in.
	// +upjet:crd:field:TFTag=-
	// +kubebuilder:validation:Required
	Region *string `json:"region" tf:"-"`

	// (Deprecated, use the aws_dms_s3_endpoint resource instead) Configuration block for S3 settings. See below.
	// This argument is deprecated and will be removed in a future version; use aws_dms_s3_endpoint instead
	// +kubebuilder:validation:Optional
	S3Settings *S3SettingsParameters `json:"s3Settings,omitempty" tf:"s3_settings,omitempty"`

	// SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
	// +kubebuilder:validation:Optional
	SSLMode *string `json:"sslMode,omitempty" tf:"ssl_mode,omitempty"`

	// ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	// +kubebuilder:validation:Optional
	SecretsManagerAccessRoleArn *string `json:"secretsManagerAccessRoleArn,omitempty" tf:"secrets_manager_access_role_arn,omitempty"`

	// Reference to a Role in iam to populate secretsManagerAccessRoleArn.
	// +kubebuilder:validation:Optional
	SecretsManagerAccessRoleArnRef *v1.Reference `json:"secretsManagerAccessRoleArnRef,omitempty" tf:"-"`

	// Selector for a Role in iam to populate secretsManagerAccessRoleArn.
	// +kubebuilder:validation:Optional
	SecretsManagerAccessRoleArnSelector *v1.Selector `json:"secretsManagerAccessRoleArnSelector,omitempty" tf:"-"`

	// text values for username, password , server_name, and port. You can't specify both.
	// +kubebuilder:validation:Optional
	SecretsManagerArn *string `json:"secretsManagerArn,omitempty" tf:"secrets_manager_arn,omitempty"`

	// Host name of the server.
	// +kubebuilder:validation:Optional
	ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"`

	// ARN used by the service access IAM role for dynamodb endpoints.
	// +crossplane:generate:reference:type=github.com/upbound/provider-aws/apis/iam/v1beta1.Role
	// +crossplane:generate:reference:extractor=github.com/upbound/provider-aws/config/common.ARNExtractor()
	// +kubebuilder:validation:Optional
	ServiceAccessRole *string `json:"serviceAccessRole,omitempty" tf:"service_access_role,omitempty"`

	// Reference to a Role in iam to populate serviceAccessRole.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleRef *v1.Reference `json:"serviceAccessRoleRef,omitempty" tf:"-"`

	// Selector for a Role in iam to populate serviceAccessRole.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleSelector *v1.Selector `json:"serviceAccessRoleSelector,omitempty" tf:"-"`

	// Key-value map of resource tags.
	// +kubebuilder:validation:Optional
	// +mapType=granular
	Tags map[string]*string `json:"tags,omitempty" tf:"tags,omitempty"`

	// User name to be used to login to the endpoint database.
	// +kubebuilder:validation:Optional
	Username *string `json:"username,omitempty" tf:"username,omitempty"`
}

func (*EndpointParameters) DeepCopy

func (in *EndpointParameters) DeepCopy() *EndpointParameters

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParameters.

func (*EndpointParameters) DeepCopyInto

func (in *EndpointParameters) DeepCopyInto(out *EndpointParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type EndpointSpec

type EndpointSpec struct {
	v1.ResourceSpec `json:",inline"`
	ForProvider     EndpointParameters `json:"forProvider"`
	// THIS IS A BETA FIELD. It will be honored
	// unless the Management Policies feature flag is disabled.
	// InitProvider holds the same fields as ForProvider, with the exception
	// of Identifier and other resource reference fields. The fields that are
	// in InitProvider are merged into ForProvider when the resource is created.
	// The same fields are also added to the terraform ignore_changes hook, to
	// avoid updating them after creation. This is useful for fields that are
	// required on creation, but we do not desire to update them after creation,
	// for example because of an external controller is managing them, like an
	// autoscaler.
	InitProvider EndpointInitParameters `json:"initProvider,omitempty"`
}

EndpointSpec defines the desired state of Endpoint

func (*EndpointSpec) DeepCopy

func (in *EndpointSpec) DeepCopy() *EndpointSpec

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSpec.

func (*EndpointSpec) DeepCopyInto

func (in *EndpointSpec) DeepCopyInto(out *EndpointSpec)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type EndpointStatus

type EndpointStatus struct {
	v1.ResourceStatus `json:",inline"`
	AtProvider        EndpointObservation `json:"atProvider,omitempty"`
}

EndpointStatus defines the observed state of Endpoint.

func (*EndpointStatus) DeepCopy

func (in *EndpointStatus) DeepCopy() *EndpointStatus

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointStatus.

func (*EndpointStatus) DeepCopyInto

func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KafkaSettingsInitParameters

type KafkaSettingsInitParameters struct {

	// Kafka broker location. Specify in the form broker-hostname-or-ip:port.
	Broker *string `json:"broker,omitempty" tf:"broker,omitempty"`

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns for records migrated to the endpoint. Default is false.
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Maximum size in bytes for records created on the endpoint Default is 1,000,000.
	MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"`

	// Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
	NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// ARN of the client certificate used to securely connect to a Kafka target endpoint.
	SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"`

	// ARN for the client private key used to securely connect to a Kafka target endpoint.
	SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"`

	// Password for the client private key used to securely connect to a Kafka target endpoint.
	SSLClientKeyPasswordSecretRef *v1.SecretKeySelector `json:"sslClientKeyPasswordSecretRef,omitempty" tf:"-"`

	// Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
	SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"`

	// Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
	SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"`

	// Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
	SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"`

	// Kafka topic for migration. Default is kafka-default-topic.
	Topic *string `json:"topic,omitempty" tf:"topic,omitempty"`
}

func (*KafkaSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsInitParameters.

func (*KafkaSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KafkaSettingsObservation

type KafkaSettingsObservation struct {

	// Kafka broker location. Specify in the form broker-hostname-or-ip:port.
	Broker *string `json:"broker,omitempty" tf:"broker,omitempty"`

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns for records migrated to the endpoint. Default is false.
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Maximum size in bytes for records created on the endpoint Default is 1,000,000.
	MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"`

	// Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
	NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// ARN of the client certificate used to securely connect to a Kafka target endpoint.
	SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"`

	// ARN for the client private key used to securely connect to a Kafka target endpoint.
	SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"`

	// Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
	SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"`

	// Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
	SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"`

	// Kafka topic for migration. Default is kafka-default-topic.
	Topic *string `json:"topic,omitempty" tf:"topic,omitempty"`
}

func (*KafkaSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsObservation.

func (*KafkaSettingsObservation) DeepCopyInto

func (in *KafkaSettingsObservation) DeepCopyInto(out *KafkaSettingsObservation)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KafkaSettingsParameters

type KafkaSettingsParameters struct {

	// Kafka broker location. Specify in the form broker-hostname-or-ip:port.
	// +kubebuilder:validation:Optional
	Broker *string `json:"broker" tf:"broker,omitempty"`

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
	// +kubebuilder:validation:Optional
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns for records migrated to the endpoint. Default is false.
	// +kubebuilder:validation:Optional
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
	// +kubebuilder:validation:Optional
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
	// +kubebuilder:validation:Optional
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
	// +kubebuilder:validation:Optional
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
	// +kubebuilder:validation:Optional
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Maximum size in bytes for records created on the endpoint Default is 1,000,000.
	// +kubebuilder:validation:Optional
	MessageMaxBytes *float64 `json:"messageMaxBytes,omitempty" tf:"message_max_bytes,omitempty"`

	// Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
	// +kubebuilder:validation:Optional
	NoHexPrefix *bool `json:"noHexPrefix,omitempty" tf:"no_hex_prefix,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
	// +kubebuilder:validation:Optional
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
	// +kubebuilder:validation:Optional
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// ARN of the client certificate used to securely connect to a Kafka target endpoint.
	// +kubebuilder:validation:Optional
	SSLClientCertificateArn *string `json:"sslClientCertificateArn,omitempty" tf:"ssl_client_certificate_arn,omitempty"`

	// ARN for the client private key used to securely connect to a Kafka target endpoint.
	// +kubebuilder:validation:Optional
	SSLClientKeyArn *string `json:"sslClientKeyArn,omitempty" tf:"ssl_client_key_arn,omitempty"`

	// Password for the client private key used to securely connect to a Kafka target endpoint.
	// +kubebuilder:validation:Optional
	SSLClientKeyPasswordSecretRef *v1.SecretKeySelector `json:"sslClientKeyPasswordSecretRef,omitempty" tf:"-"`

	// Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
	// +kubebuilder:validation:Optional
	SaslPasswordSecretRef *v1.SecretKeySelector `json:"saslPasswordSecretRef,omitempty" tf:"-"`

	// Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
	// +kubebuilder:validation:Optional
	SaslUsername *string `json:"saslUsername,omitempty" tf:"sasl_username,omitempty"`

	// Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
	// +kubebuilder:validation:Optional
	SecurityProtocol *string `json:"securityProtocol,omitempty" tf:"security_protocol,omitempty"`

	// Kafka topic for migration. Default is kafka-default-topic.
	// +kubebuilder:validation:Optional
	Topic *string `json:"topic,omitempty" tf:"topic,omitempty"`
}

func (*KafkaSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSettingsParameters.

func (*KafkaSettingsParameters) DeepCopyInto

func (in *KafkaSettingsParameters) DeepCopyInto(out *KafkaSettingsParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KinesisSettingsInitParameters

type KinesisSettingsInitParameters struct {

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns in the target. Default is false.
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. Default is false.
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN of the IAM Role with permissions to write to the Kinesis data stream.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// ARN of the Kinesis data stream.
	StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"`
}

func (*KinesisSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsInitParameters.

func (*KinesisSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KinesisSettingsObservation

type KinesisSettingsObservation struct {

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns in the target. Default is false.
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. Default is false.
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN of the IAM Role with permissions to write to the Kinesis data stream.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// ARN of the Kinesis data stream.
	StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"`
}

func (*KinesisSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsObservation.

func (*KinesisSettingsObservation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type KinesisSettingsParameters

type KinesisSettingsParameters struct {

	// Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
	// +kubebuilder:validation:Optional
	IncludeControlDetails *bool `json:"includeControlDetails,omitempty" tf:"include_control_details,omitempty"`

	// Include NULL and empty columns in the target. Default is false.
	// +kubebuilder:validation:Optional
	IncludeNullAndEmpty *bool `json:"includeNullAndEmpty,omitempty" tf:"include_null_and_empty,omitempty"`

	// Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
	// +kubebuilder:validation:Optional
	IncludePartitionValue *bool `json:"includePartitionValue,omitempty" tf:"include_partition_value,omitempty"`

	// Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
	// +kubebuilder:validation:Optional
	IncludeTableAlterOperations *bool `json:"includeTableAlterOperations,omitempty" tf:"include_table_alter_operations,omitempty"`

	// Provides detailed transaction information from the source database. Default is false.
	// +kubebuilder:validation:Optional
	IncludeTransactionDetails *bool `json:"includeTransactionDetails,omitempty" tf:"include_transaction_details,omitempty"`

	// Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
	// +kubebuilder:validation:Optional
	MessageFormat *string `json:"messageFormat,omitempty" tf:"message_format,omitempty"`

	// Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
	// +kubebuilder:validation:Optional
	PartitionIncludeSchemaTable *bool `json:"partitionIncludeSchemaTable,omitempty" tf:"partition_include_schema_table,omitempty"`

	// ARN of the IAM Role with permissions to write to the Kinesis data stream.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// ARN of the Kinesis data stream.
	// +kubebuilder:validation:Optional
	StreamArn *string `json:"streamArn,omitempty" tf:"stream_arn,omitempty"`
}

func (*KinesisSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KinesisSettingsParameters.

func (*KinesisSettingsParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type MongodbSettingsInitParameters

type MongodbSettingsInitParameters struct {

	// Authentication mechanism to access the MongoDB source endpoint. Default is default.
	AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"`

	// Authentication database name. Not used when auth_type is no. Default is admin.
	AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"`

	// Authentication type to access the MongoDB source endpoint. Default is password.
	AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"`

	// Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
	DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"`

	// Document ID. Use this setting when nesting_level is set to none. Default is false.
	ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"`

	// Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
	NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"`
}

func (*MongodbSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsInitParameters.

func (*MongodbSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type MongodbSettingsObservation

type MongodbSettingsObservation struct {

	// Authentication mechanism to access the MongoDB source endpoint. Default is default.
	AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"`

	// Authentication database name. Not used when auth_type is no. Default is admin.
	AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"`

	// Authentication type to access the MongoDB source endpoint. Default is password.
	AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"`

	// Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
	DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"`

	// Document ID. Use this setting when nesting_level is set to none. Default is false.
	ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"`

	// Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
	NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"`
}

func (*MongodbSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsObservation.

func (*MongodbSettingsObservation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type MongodbSettingsParameters

type MongodbSettingsParameters struct {

	// Authentication mechanism to access the MongoDB source endpoint. Default is default.
	// +kubebuilder:validation:Optional
	AuthMechanism *string `json:"authMechanism,omitempty" tf:"auth_mechanism,omitempty"`

	// Authentication database name. Not used when auth_type is no. Default is admin.
	// +kubebuilder:validation:Optional
	AuthSource *string `json:"authSource,omitempty" tf:"auth_source,omitempty"`

	// Authentication type to access the MongoDB source endpoint. Default is password.
	// +kubebuilder:validation:Optional
	AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"`

	// Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
	// +kubebuilder:validation:Optional
	DocsToInvestigate *string `json:"docsToInvestigate,omitempty" tf:"docs_to_investigate,omitempty"`

	// Document ID. Use this setting when nesting_level is set to none. Default is false.
	// +kubebuilder:validation:Optional
	ExtractDocID *string `json:"extractDocId,omitempty" tf:"extract_doc_id,omitempty"`

	// Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
	// +kubebuilder:validation:Optional
	NestingLevel *string `json:"nestingLevel,omitempty" tf:"nesting_level,omitempty"`
}

func (*MongodbSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MongodbSettingsParameters.

func (*MongodbSettingsParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type PostgresSettingsInitParameters

type PostgresSettingsInitParameters struct {

	// For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
	AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"`

	// The Babelfish for Aurora PostgreSQL database name for the endpoint.
	BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"`

	// To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
	CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"`

	// Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
	DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"`

	// Sets the schema in which the operational DDL database artifacts are created. Default is public.
	DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"`

	// Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
	ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"`

	// When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
	FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"`

	// The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
	HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"`

	// Sets the WAL heartbeat frequency (in minutes). Default value is 5.
	HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"`

	// Sets the schema in which the heartbeat artifacts are created. Default value is public.
	HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"`

	// You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
	MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"`

	// Optional When true, DMS migrates JSONB values as CLOB.
	MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"`

	// Optional When true, DMS migrates LONG values as VARCHAR.
	MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"`

	// Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
	PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"`

	// Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
	SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"`
}

func (*PostgresSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsInitParameters.

func (*PostgresSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type PostgresSettingsObservation

type PostgresSettingsObservation struct {

	// For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
	AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"`

	// The Babelfish for Aurora PostgreSQL database name for the endpoint.
	BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"`

	// To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
	CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"`

	// Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
	DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"`

	// Sets the schema in which the operational DDL database artifacts are created. Default is public.
	DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"`

	// Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
	ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"`

	// When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
	FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"`

	// The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
	HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"`

	// Sets the WAL heartbeat frequency (in minutes). Default value is 5.
	HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"`

	// Sets the schema in which the heartbeat artifacts are created. Default value is public.
	HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"`

	// You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
	MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"`

	// Optional When true, DMS migrates JSONB values as CLOB.
	MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"`

	// Optional When true, DMS migrates LONG values as VARCHAR.
	MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"`

	// Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
	PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"`

	// Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
	SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"`
}

func (*PostgresSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsObservation.

func (*PostgresSettingsObservation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type PostgresSettingsParameters

type PostgresSettingsParameters struct {

	// For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
	// +kubebuilder:validation:Optional
	AfterConnectScript *string `json:"afterConnectScript,omitempty" tf:"after_connect_script,omitempty"`

	// The Babelfish for Aurora PostgreSQL database name for the endpoint.
	// +kubebuilder:validation:Optional
	BabelfishDatabaseName *string `json:"babelfishDatabaseName,omitempty" tf:"babelfish_database_name,omitempty"`

	// To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
	// +kubebuilder:validation:Optional
	CaptureDdls *bool `json:"captureDdls,omitempty" tf:"capture_ddls,omitempty"`

	// Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
	// +kubebuilder:validation:Optional
	DatabaseMode *string `json:"databaseMode,omitempty" tf:"database_mode,omitempty"`

	// Sets the schema in which the operational DDL database artifacts are created. Default is public.
	// +kubebuilder:validation:Optional
	DdlArtifactsSchema *string `json:"ddlArtifactsSchema,omitempty" tf:"ddl_artifacts_schema,omitempty"`

	// Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
	// +kubebuilder:validation:Optional
	ExecuteTimeout *float64 `json:"executeTimeout,omitempty" tf:"execute_timeout,omitempty"`

	// When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
	// +kubebuilder:validation:Optional
	FailTasksOnLobTruncation *bool `json:"failTasksOnLobTruncation,omitempty" tf:"fail_tasks_on_lob_truncation,omitempty"`

	// The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
	// +kubebuilder:validation:Optional
	HeartbeatEnable *bool `json:"heartbeatEnable,omitempty" tf:"heartbeat_enable,omitempty"`

	// Sets the WAL heartbeat frequency (in minutes). Default value is 5.
	// +kubebuilder:validation:Optional
	HeartbeatFrequency *float64 `json:"heartbeatFrequency,omitempty" tf:"heartbeat_frequency,omitempty"`

	// Sets the schema in which the heartbeat artifacts are created. Default value is public.
	// +kubebuilder:validation:Optional
	HeartbeatSchema *string `json:"heartbeatSchema,omitempty" tf:"heartbeat_schema,omitempty"`

	// You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
	// +kubebuilder:validation:Optional
	MapBooleanAsBoolean *bool `json:"mapBooleanAsBoolean,omitempty" tf:"map_boolean_as_boolean,omitempty"`

	// Optional When true, DMS migrates JSONB values as CLOB.
	// +kubebuilder:validation:Optional
	MapJsonbAsClob *bool `json:"mapJsonbAsClob,omitempty" tf:"map_jsonb_as_clob,omitempty"`

	// Optional When true, DMS migrates LONG values as VARCHAR.
	// +kubebuilder:validation:Optional
	MapLongVarcharAs *string `json:"mapLongVarcharAs,omitempty" tf:"map_long_varchar_as,omitempty"`

	// Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
	// +kubebuilder:validation:Optional
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
	// +kubebuilder:validation:Optional
	PluginName *string `json:"pluginName,omitempty" tf:"plugin_name,omitempty"`

	// Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
	// +kubebuilder:validation:Optional
	SlotName *string `json:"slotName,omitempty" tf:"slot_name,omitempty"`
}

func (*PostgresSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresSettingsParameters.

func (*PostgresSettingsParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedisSettingsInitParameters

type RedisSettingsInitParameters struct {

	// The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
	AuthPasswordSecretRef *v1.SecretKeySelector `json:"authPasswordSecretRef,omitempty" tf:"-"`

	// Authentication type to access the MongoDB source endpoint. Default is password.
	AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"`

	// The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
	AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"`

	// Port used by the endpoint database.
	Port *float64 `json:"port,omitempty" tf:"port,omitempty"`

	// The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
	SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"`

	// Host name of the server.
	ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"`
}

func (*RedisSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsInitParameters.

func (*RedisSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedisSettingsObservation

type RedisSettingsObservation struct {

	// Authentication type to access the MongoDB source endpoint. Default is password.
	AuthType *string `json:"authType,omitempty" tf:"auth_type,omitempty"`

	// The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
	AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"`

	// Port used by the endpoint database.
	Port *float64 `json:"port,omitempty" tf:"port,omitempty"`

	// The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
	SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"`

	// Host name of the server.
	ServerName *string `json:"serverName,omitempty" tf:"server_name,omitempty"`
}

func (*RedisSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsObservation.

func (*RedisSettingsObservation) DeepCopyInto

func (in *RedisSettingsObservation) DeepCopyInto(out *RedisSettingsObservation)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedisSettingsParameters

type RedisSettingsParameters struct {

	// The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
	// +kubebuilder:validation:Optional
	AuthPasswordSecretRef *v1.SecretKeySelector `json:"authPasswordSecretRef,omitempty" tf:"-"`

	// Authentication type to access the MongoDB source endpoint. Default is password.
	// +kubebuilder:validation:Optional
	AuthType *string `json:"authType" tf:"auth_type,omitempty"`

	// The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
	// +kubebuilder:validation:Optional
	AuthUserName *string `json:"authUserName,omitempty" tf:"auth_user_name,omitempty"`

	// Port used by the endpoint database.
	// +kubebuilder:validation:Optional
	Port *float64 `json:"port" tf:"port,omitempty"`

	// The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
	// +kubebuilder:validation:Optional
	SSLCACertificateArn *string `json:"sslCaCertificateArn,omitempty" tf:"ssl_ca_certificate_arn,omitempty"`

	// The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
	// +kubebuilder:validation:Optional
	SSLSecurityProtocol *string `json:"sslSecurityProtocol,omitempty" tf:"ssl_security_protocol,omitempty"`

	// Host name of the server.
	// +kubebuilder:validation:Optional
	ServerName *string `json:"serverName" tf:"server_name,omitempty"`
}

func (*RedisSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisSettingsParameters.

func (*RedisSettingsParameters) DeepCopyInto

func (in *RedisSettingsParameters) DeepCopyInto(out *RedisSettingsParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedshiftSettingsInitParameters

type RedshiftSettingsInitParameters struct {

	// Custom S3 Bucket Object prefix for intermediate storage.
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// Custom S3 Bucket name for intermediate storage.
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`
}

func (*RedshiftSettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsInitParameters.

func (*RedshiftSettingsInitParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedshiftSettingsObservation

type RedshiftSettingsObservation struct {

	// Custom S3 Bucket Object prefix for intermediate storage.
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// Custom S3 Bucket name for intermediate storage.
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`
}

func (*RedshiftSettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsObservation.

func (*RedshiftSettingsObservation) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type RedshiftSettingsParameters

type RedshiftSettingsParameters struct {

	// Custom S3 Bucket Object prefix for intermediate storage.
	// +kubebuilder:validation:Optional
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// Custom S3 Bucket name for intermediate storage.
	// +kubebuilder:validation:Optional
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
	// +kubebuilder:validation:Optional
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	// +kubebuilder:validation:Optional
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`
}

func (*RedshiftSettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedshiftSettingsParameters.

func (*RedshiftSettingsParameters) DeepCopyInto

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type S3SettingsInitParameters

type S3SettingsInitParameters struct {

	// Whether to add column name information to the .csv output file. Default is false.
	AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"`

	// S3 object prefix.
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// S3 bucket name.
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
	CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"`

	// Whether to write insert and update operations to .csv or .parquet output files. Default is false.
	CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"`

	// Whether to write insert operations to .csv or .parquet output files. Default is false.
	CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"`

	// Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
	CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"`

	// Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
	CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"`

	// Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
	CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"`

	// Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
	CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"`

	// Delimiter used to separate columns in the source files. Default is ,.
	CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"`

	// String to use for all columns not included in the supplemental log.
	CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"`

	// String to as null when writing to the target.
	CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"`

	// Delimiter used to separate rows in the source files. Default is \n.
	CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"`

	// Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
	DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"`

	// Size of one data page in bytes. Default is 1048576 (1 MiB).
	DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"`

	// Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
	DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"`

	// Partition S3 bucket folders based on transaction commit dates. Default is false.
	DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"`

	// Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
	DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"`

	// Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
	DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"`

	// Whether to enable statistics for Parquet pages and row groups. Default is true.
	EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"`

	// Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
	EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"`

	// Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// JSON document that describes how AWS DMS should interpret the data.
	ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"`

	// Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
	GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"`

	// When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
	IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"`

	// Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
	IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"`

	// Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
	ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"`

	// Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
	ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"`

	// Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
	PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"`

	// For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
	Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"`

	// Number of rows in a row group. Default is 10000.
	RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// Column to add with timestamp information to the endpoint data for an Amazon S3 target.
	TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"`

	// Whether to use csv_no_sup_value for columns not included in the supplemental log.
	UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"`

	// When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
	UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"`
}

func (*S3SettingsInitParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsInitParameters.

func (*S3SettingsInitParameters) DeepCopyInto

func (in *S3SettingsInitParameters) DeepCopyInto(out *S3SettingsInitParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type S3SettingsObservation

type S3SettingsObservation struct {

	// Whether to add column name information to the .csv output file. Default is false.
	AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"`

	// S3 object prefix.
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// S3 bucket name.
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
	CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"`

	// Whether to write insert and update operations to .csv or .parquet output files. Default is false.
	CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"`

	// Whether to write insert operations to .csv or .parquet output files. Default is false.
	CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"`

	// Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
	CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"`

	// Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
	CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"`

	// Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
	CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"`

	// Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
	CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"`

	// Delimiter used to separate columns in the source files. Default is ,.
	CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"`

	// String to use for all columns not included in the supplemental log.
	CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"`

	// String to as null when writing to the target.
	CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"`

	// Delimiter used to separate rows in the source files. Default is \n.
	CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"`

	// Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
	DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"`

	// Size of one data page in bytes. Default is 1048576 (1 MiB).
	DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"`

	// Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
	DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"`

	// Partition S3 bucket folders based on transaction commit dates. Default is false.
	DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"`

	// Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
	DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"`

	// Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
	DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"`

	// Whether to enable statistics for Parquet pages and row groups. Default is true.
	EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"`

	// Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
	EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"`

	// Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// JSON document that describes how AWS DMS should interpret the data.
	ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"`

	// Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
	GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"`

	// When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
	IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"`

	// Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
	IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"`

	// Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
	ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"`

	// Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
	ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"`

	// Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
	PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"`

	// For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
	Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"`

	// Number of rows in a row group. Default is 10000.
	RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// Column to add with timestamp information to the endpoint data for an Amazon S3 target.
	TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"`

	// Whether to use csv_no_sup_value for columns not included in the supplemental log.
	UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"`

	// When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
	UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"`
}

func (*S3SettingsObservation) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsObservation.

func (*S3SettingsObservation) DeepCopyInto

func (in *S3SettingsObservation) DeepCopyInto(out *S3SettingsObservation)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

type S3SettingsParameters

type S3SettingsParameters struct {

	// Whether to add column name information to the .csv output file. Default is false.
	// +kubebuilder:validation:Optional
	AddColumnName *bool `json:"addColumnName,omitempty" tf:"add_column_name,omitempty"`

	// S3 object prefix.
	// +kubebuilder:validation:Optional
	BucketFolder *string `json:"bucketFolder,omitempty" tf:"bucket_folder,omitempty"`

	// S3 bucket name.
	// +kubebuilder:validation:Optional
	BucketName *string `json:"bucketName,omitempty" tf:"bucket_name,omitempty"`

	// Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
	// +kubebuilder:validation:Optional
	CannedACLForObjects *string `json:"cannedAclForObjects,omitempty" tf:"canned_acl_for_objects,omitempty"`

	// Whether to write insert and update operations to .csv or .parquet output files. Default is false.
	// +kubebuilder:validation:Optional
	CdcInsertsAndUpdates *bool `json:"cdcInsertsAndUpdates,omitempty" tf:"cdc_inserts_and_updates,omitempty"`

	// Whether to write insert operations to .csv or .parquet output files. Default is false.
	// +kubebuilder:validation:Optional
	CdcInsertsOnly *bool `json:"cdcInsertsOnly,omitempty" tf:"cdc_inserts_only,omitempty"`

	// Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
	// +kubebuilder:validation:Optional
	CdcMaxBatchInterval *float64 `json:"cdcMaxBatchInterval,omitempty" tf:"cdc_max_batch_interval,omitempty"`

	// Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
	// +kubebuilder:validation:Optional
	CdcMinFileSize *float64 `json:"cdcMinFileSize,omitempty" tf:"cdc_min_file_size,omitempty"`

	// Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
	// +kubebuilder:validation:Optional
	CdcPath *string `json:"cdcPath,omitempty" tf:"cdc_path,omitempty"`

	// Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
	// +kubebuilder:validation:Optional
	CompressionType *string `json:"compressionType,omitempty" tf:"compression_type,omitempty"`

	// Delimiter used to separate columns in the source files. Default is ,.
	// +kubebuilder:validation:Optional
	CsvDelimiter *string `json:"csvDelimiter,omitempty" tf:"csv_delimiter,omitempty"`

	// String to use for all columns not included in the supplemental log.
	// +kubebuilder:validation:Optional
	CsvNoSupValue *string `json:"csvNoSupValue,omitempty" tf:"csv_no_sup_value,omitempty"`

	// String to as null when writing to the target.
	// +kubebuilder:validation:Optional
	CsvNullValue *string `json:"csvNullValue,omitempty" tf:"csv_null_value,omitempty"`

	// Delimiter used to separate rows in the source files. Default is \n.
	// +kubebuilder:validation:Optional
	CsvRowDelimiter *string `json:"csvRowDelimiter,omitempty" tf:"csv_row_delimiter,omitempty"`

	// Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
	// +kubebuilder:validation:Optional
	DataFormat *string `json:"dataFormat,omitempty" tf:"data_format,omitempty"`

	// Size of one data page in bytes. Default is 1048576 (1 MiB).
	// +kubebuilder:validation:Optional
	DataPageSize *float64 `json:"dataPageSize,omitempty" tf:"data_page_size,omitempty"`

	// Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
	// +kubebuilder:validation:Optional
	DatePartitionDelimiter *string `json:"datePartitionDelimiter,omitempty" tf:"date_partition_delimiter,omitempty"`

	// Partition S3 bucket folders based on transaction commit dates. Default is false.
	// +kubebuilder:validation:Optional
	DatePartitionEnabled *bool `json:"datePartitionEnabled,omitempty" tf:"date_partition_enabled,omitempty"`

	// Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
	// +kubebuilder:validation:Optional
	DatePartitionSequence *string `json:"datePartitionSequence,omitempty" tf:"date_partition_sequence,omitempty"`

	// Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
	// +kubebuilder:validation:Optional
	DictPageSizeLimit *float64 `json:"dictPageSizeLimit,omitempty" tf:"dict_page_size_limit,omitempty"`

	// Whether to enable statistics for Parquet pages and row groups. Default is true.
	// +kubebuilder:validation:Optional
	EnableStatistics *bool `json:"enableStatistics,omitempty" tf:"enable_statistics,omitempty"`

	// Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
	// +kubebuilder:validation:Optional
	EncodingType *string `json:"encodingType,omitempty" tf:"encoding_type,omitempty"`

	// Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
	// +kubebuilder:validation:Optional
	EncryptionMode *string `json:"encryptionMode,omitempty" tf:"encryption_mode,omitempty"`

	// JSON document that describes how AWS DMS should interpret the data.
	// +kubebuilder:validation:Optional
	ExternalTableDefinition *string `json:"externalTableDefinition,omitempty" tf:"external_table_definition,omitempty"`

	// Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
	// +kubebuilder:validation:Optional
	GlueCatalogGeneration *bool `json:"glueCatalogGeneration,omitempty" tf:"glue_catalog_generation,omitempty"`

	// When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
	// +kubebuilder:validation:Optional
	IgnoreHeaderRows *float64 `json:"ignoreHeaderRows,omitempty" tf:"ignore_header_rows,omitempty"`

	// Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
	// +kubebuilder:validation:Optional
	IncludeOpForFullLoad *bool `json:"includeOpForFullLoad,omitempty" tf:"include_op_for_full_load,omitempty"`

	// Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
	// +kubebuilder:validation:Optional
	MaxFileSize *float64 `json:"maxFileSize,omitempty" tf:"max_file_size,omitempty"`

	// - Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
	// +kubebuilder:validation:Optional
	ParquetTimestampInMillisecond *bool `json:"parquetTimestampInMillisecond,omitempty" tf:"parquet_timestamp_in_millisecond,omitempty"`

	// Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
	// +kubebuilder:validation:Optional
	ParquetVersion *string `json:"parquetVersion,omitempty" tf:"parquet_version,omitempty"`

	// Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
	// +kubebuilder:validation:Optional
	PreserveTransactions *bool `json:"preserveTransactions,omitempty" tf:"preserve_transactions,omitempty"`

	// For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
	// +kubebuilder:validation:Optional
	Rfc4180 *bool `json:"rfc4180,omitempty" tf:"rfc_4180,omitempty"`

	// Number of rows in a row group. Default is 10000.
	// +kubebuilder:validation:Optional
	RowGroupLength *float64 `json:"rowGroupLength,omitempty" tf:"row_group_length,omitempty"`

	// ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
	// +kubebuilder:validation:Optional
	ServerSideEncryptionKMSKeyID *string `json:"serverSideEncryptionKmsKeyId,omitempty" tf:"server_side_encryption_kms_key_id,omitempty"`

	// ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
	// +kubebuilder:validation:Optional
	ServiceAccessRoleArn *string `json:"serviceAccessRoleArn,omitempty" tf:"service_access_role_arn,omitempty"`

	// Column to add with timestamp information to the endpoint data for an Amazon S3 target.
	// +kubebuilder:validation:Optional
	TimestampColumnName *string `json:"timestampColumnName,omitempty" tf:"timestamp_column_name,omitempty"`

	// Whether to use csv_no_sup_value for columns not included in the supplemental log.
	// +kubebuilder:validation:Optional
	UseCsvNoSupValue *bool `json:"useCsvNoSupValue,omitempty" tf:"use_csv_no_sup_value,omitempty"`

	// When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
	// +kubebuilder:validation:Optional
	UseTaskStartTimeForFullLoadTimestamp *bool `json:"useTaskStartTimeForFullLoadTimestamp,omitempty" tf:"use_task_start_time_for_full_load_timestamp,omitempty"`
}

func (*S3SettingsParameters) DeepCopy

DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3SettingsParameters.

func (*S3SettingsParameters) DeepCopyInto

func (in *S3SettingsParameters) DeepCopyInto(out *S3SettingsParameters)

DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL