Documentation ¶
Overview ¶
+kubebuilder:object:generate=true +groupName=cluster.mongodbatlas.crossplane.io +versionName=v1alpha1
Index ¶
- Constants
- Variables
- type AdvancedConfigurationObservation
- type AdvancedConfigurationParameters
- type BiConnectorConfigObservation
- type BiConnectorConfigParameters
- type Cluster
- func (in *Cluster) DeepCopy() *Cluster
- func (in *Cluster) DeepCopyInto(out *Cluster)
- func (in *Cluster) DeepCopyObject() runtime.Object
- func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition
- func (tr *Cluster) GetConnectionDetailsMapping() map[string]string
- func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy
- func (tr *Cluster) GetID() string
- func (mg *Cluster) GetManagementPolicy() xpv1.ManagementPolicy
- func (tr *Cluster) GetObservation() (map[string]any, error)
- func (tr *Cluster) GetParameters() (map[string]any, error)
- func (mg *Cluster) GetProviderConfigReference() *xpv1.Reference
- func (mg *Cluster) GetProviderReference() *xpv1.Reference
- func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
- func (mg *Cluster) GetTerraformResourceType() string
- func (tr *Cluster) GetTerraformSchemaVersion() int
- func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference
- func (tr *Cluster) LateInitialize(attrs []byte) (bool, error)
- func (mg *Cluster) SetConditions(c ...xpv1.Condition)
- func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy)
- func (mg *Cluster) SetManagementPolicy(r xpv1.ManagementPolicy)
- func (tr *Cluster) SetObservation(obs map[string]any) error
- func (tr *Cluster) SetParameters(params map[string]any) error
- func (mg *Cluster) SetProviderConfigReference(r *xpv1.Reference)
- func (mg *Cluster) SetProviderReference(r *xpv1.Reference)
- func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
- func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
- type ClusterList
- type ClusterObservation
- type ClusterParameters
- type ClusterSpec
- type ClusterStatus
- type ConnectionStringsObservation
- type ConnectionStringsParameters
- type EndpointsObservation
- type EndpointsParameters
- type LabelsObservation
- type LabelsParameters
- type OutageFiltersObservation
- type OutageFiltersParameters
- type OutageSimulation
- func (in *OutageSimulation) DeepCopy() *OutageSimulation
- func (in *OutageSimulation) DeepCopyInto(out *OutageSimulation)
- func (in *OutageSimulation) DeepCopyObject() runtime.Object
- func (mg *OutageSimulation) GetCondition(ct xpv1.ConditionType) xpv1.Condition
- func (tr *OutageSimulation) GetConnectionDetailsMapping() map[string]string
- func (mg *OutageSimulation) GetDeletionPolicy() xpv1.DeletionPolicy
- func (tr *OutageSimulation) GetID() string
- func (mg *OutageSimulation) GetManagementPolicy() xpv1.ManagementPolicy
- func (tr *OutageSimulation) GetObservation() (map[string]any, error)
- func (tr *OutageSimulation) GetParameters() (map[string]any, error)
- func (mg *OutageSimulation) GetProviderConfigReference() *xpv1.Reference
- func (mg *OutageSimulation) GetProviderReference() *xpv1.Reference
- func (mg *OutageSimulation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
- func (mg *OutageSimulation) GetTerraformResourceType() string
- func (tr *OutageSimulation) GetTerraformSchemaVersion() int
- func (mg *OutageSimulation) GetWriteConnectionSecretToReference() *xpv1.SecretReference
- func (tr *OutageSimulation) LateInitialize(attrs []byte) (bool, error)
- func (mg *OutageSimulation) SetConditions(c ...xpv1.Condition)
- func (mg *OutageSimulation) SetDeletionPolicy(r xpv1.DeletionPolicy)
- func (mg *OutageSimulation) SetManagementPolicy(r xpv1.ManagementPolicy)
- func (tr *OutageSimulation) SetObservation(obs map[string]any) error
- func (tr *OutageSimulation) SetParameters(params map[string]any) error
- func (mg *OutageSimulation) SetProviderConfigReference(r *xpv1.Reference)
- func (mg *OutageSimulation) SetProviderReference(r *xpv1.Reference)
- func (mg *OutageSimulation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
- func (mg *OutageSimulation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
- type OutageSimulationList
- type OutageSimulationObservation
- type OutageSimulationParameters
- type OutageSimulationSpec
- type OutageSimulationStatus
- type PoliciesObservation
- type PoliciesParameters
- type PolicyItemObservation
- type PolicyItemParameters
- type PrivateEndpointObservation
- type PrivateEndpointParameters
- type RegionsConfigObservation
- type RegionsConfigParameters
- type ReplicationSpecsObservation
- type ReplicationSpecsParameters
- type SnapshotBackupPolicyObservation
- type SnapshotBackupPolicyParameters
Constants ¶
const ( CRDGroup = "cluster.mongodbatlas.crossplane.io" CRDVersion = "v1alpha1" )
Package type metadata.
Variables ¶
var ( Cluster_Kind = "Cluster" Cluster_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: Cluster_Kind}.String() Cluster_KindAPIVersion = Cluster_Kind + "." + CRDGroupVersion.String() Cluster_GroupVersionKind = CRDGroupVersion.WithKind(Cluster_Kind) )
Repository type metadata.
var ( // CRDGroupVersion is the API Group Version used to register the objects CRDGroupVersion = schema.GroupVersion{Group: CRDGroup, Version: CRDVersion} // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = &scheme.Builder{GroupVersion: CRDGroupVersion} // AddToScheme adds the types in this group-version to the given scheme. AddToScheme = SchemeBuilder.AddToScheme )
var ( OutageSimulation_Kind = "OutageSimulation" OutageSimulation_GroupKind = schema.GroupKind{Group: CRDGroup, Kind: OutageSimulation_Kind}.String() OutageSimulation_KindAPIVersion = OutageSimulation_Kind + "." + CRDGroupVersion.String() OutageSimulation_GroupVersionKind = CRDGroupVersion.WithKind(OutageSimulation_Kind) )
Repository type metadata.
Functions ¶
This section is empty.
Types ¶
type AdvancedConfigurationObservation ¶
type AdvancedConfigurationObservation struct { // Default level of acknowledgment requested from MongoDB for read operations set for this cluster. MongoDB 4.4 clusters default to available. DefaultReadConcern *string `json:"defaultReadConcern,omitempty" tf:"default_read_concern,omitempty"` // Default level of acknowledgment requested from MongoDB for write operations set for this cluster. MongoDB 4.4 clusters default to 1. DefaultWriteConcern *string `json:"defaultWriteConcern,omitempty" tf:"default_write_concern,omitempty"` // When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. FailIndexKeyTooLong *bool `json:"failIndexKeyTooLong,omitempty" tf:"fail_index_key_too_long,omitempty"` // When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations. JavascriptEnabled *bool `json:"javascriptEnabled,omitempty" tf:"javascript_enabled,omitempty"` // Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are: MinimumEnabledTLSProtocol *string `json:"minimumEnabledTlsProtocol,omitempty" tf:"minimum_enabled_tls_protocol,omitempty"` // When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations. NoTableScan *bool `json:"noTableScan,omitempty" tf:"no_table_scan,omitempty"` // Minimum retention window for cluster's oplog expressed in hours. A value of null indicates that the cluster uses the default minimum oplog window that MongoDB Cloud calculates. OplogMinRetentionHours *float64 `json:"oplogMinRetentionHours,omitempty" tf:"oplog_min_retention_hours,omitempty"` // The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas. OplogSizeMb *float64 `json:"oplogSizeMb,omitempty" tf:"oplog_size_mb,omitempty"` // Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled. SampleRefreshIntervalBiConnector *float64 `json:"sampleRefreshIntervalBiConnector,omitempty" tf:"sample_refresh_interval_bi_connector,omitempty"` // Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled. SampleSizeBiConnector *float64 `json:"sampleSizeBiConnector,omitempty" tf:"sample_size_bi_connector,omitempty"` // Lifetime, in seconds, of multi-document transactions. Defaults to 60 seconds. TransactionLifetimeLimitSeconds *float64 `json:"transactionLifetimeLimitSeconds,omitempty" tf:"transaction_lifetime_limit_seconds,omitempty"` }
func (*AdvancedConfigurationObservation) DeepCopy ¶
func (in *AdvancedConfigurationObservation) DeepCopy() *AdvancedConfigurationObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfigurationObservation.
func (*AdvancedConfigurationObservation) DeepCopyInto ¶
func (in *AdvancedConfigurationObservation) DeepCopyInto(out *AdvancedConfigurationObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type AdvancedConfigurationParameters ¶
type AdvancedConfigurationParameters struct { // Default level of acknowledgment requested from MongoDB for read operations set for this cluster. MongoDB 4.4 clusters default to available. // +kubebuilder:validation:Optional DefaultReadConcern *string `json:"defaultReadConcern,omitempty" tf:"default_read_concern,omitempty"` // Default level of acknowledgment requested from MongoDB for write operations set for this cluster. MongoDB 4.4 clusters default to 1. // +kubebuilder:validation:Optional DefaultWriteConcern *string `json:"defaultWriteConcern,omitempty" tf:"default_write_concern,omitempty"` // When true, documents can only be updated or inserted if, for all indexed fields on the target collection, the corresponding index entries do not exceed 1024 bytes. When false, mongod writes documents that exceed the limit but does not index them. // +kubebuilder:validation:Optional FailIndexKeyTooLong *bool `json:"failIndexKeyTooLong,omitempty" tf:"fail_index_key_too_long,omitempty"` // When true, the cluster allows execution of operations that perform server-side executions of JavaScript. When false, the cluster disables execution of those operations. // +kubebuilder:validation:Optional JavascriptEnabled *bool `json:"javascriptEnabled,omitempty" tf:"javascript_enabled,omitempty"` // Sets the minimum Transport Layer Security (TLS) version the cluster accepts for incoming connections.Valid values are: // +kubebuilder:validation:Optional MinimumEnabledTLSProtocol *string `json:"minimumEnabledTlsProtocol,omitempty" tf:"minimum_enabled_tls_protocol,omitempty"` // When true, the cluster disables the execution of any query that requires a collection scan to return results. When false, the cluster allows the execution of those operations. // +kubebuilder:validation:Optional NoTableScan *bool `json:"noTableScan,omitempty" tf:"no_table_scan,omitempty"` // Minimum retention window for cluster's oplog expressed in hours. A value of null indicates that the cluster uses the default minimum oplog window that MongoDB Cloud calculates. // +kubebuilder:validation:Optional OplogMinRetentionHours *float64 `json:"oplogMinRetentionHours,omitempty" tf:"oplog_min_retention_hours,omitempty"` // The custom oplog size of the cluster. Without a value that indicates that the cluster uses the default oplog size calculated by Atlas. // +kubebuilder:validation:Optional OplogSizeMb *float64 `json:"oplogSizeMb,omitempty" tf:"oplog_size_mb,omitempty"` // Interval in seconds at which the mongosqld process re-samples data to create its relational schema. The default value is 300. The specified value must be a positive integer. Available only for Atlas deployments in which BI Connector for Atlas is enabled. // +kubebuilder:validation:Optional SampleRefreshIntervalBiConnector *float64 `json:"sampleRefreshIntervalBiConnector,omitempty" tf:"sample_refresh_interval_bi_connector,omitempty"` // Number of documents per database to sample when gathering schema information. Defaults to 100. Available only for Atlas deployments in which BI Connector for Atlas is enabled. // +kubebuilder:validation:Optional SampleSizeBiConnector *float64 `json:"sampleSizeBiConnector,omitempty" tf:"sample_size_bi_connector,omitempty"` // Lifetime, in seconds, of multi-document transactions. Defaults to 60 seconds. // +kubebuilder:validation:Optional TransactionLifetimeLimitSeconds *float64 `json:"transactionLifetimeLimitSeconds,omitempty" tf:"transaction_lifetime_limit_seconds,omitempty"` }
func (*AdvancedConfigurationParameters) DeepCopy ¶
func (in *AdvancedConfigurationParameters) DeepCopy() *AdvancedConfigurationParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfigurationParameters.
func (*AdvancedConfigurationParameters) DeepCopyInto ¶
func (in *AdvancedConfigurationParameters) DeepCopyInto(out *AdvancedConfigurationParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type BiConnectorConfigObservation ¶
type BiConnectorConfigObservation struct { // Specifies whether or not BI Connector for Atlas is enabled on the cluster.l Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // Specifies the read preference to be used by BI Connector for Atlas on the cluster. Each BI Connector for Atlas read preference contains a distinct combination of readPreference and readPreferenceTags options. For details on BI Connector for Atlas read preferences, refer to the BI Connector Read Preferences Table. ReadPreference *string `json:"readPreference,omitempty" tf:"read_preference,omitempty"` }
func (*BiConnectorConfigObservation) DeepCopy ¶
func (in *BiConnectorConfigObservation) DeepCopy() *BiConnectorConfigObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BiConnectorConfigObservation.
func (*BiConnectorConfigObservation) DeepCopyInto ¶
func (in *BiConnectorConfigObservation) DeepCopyInto(out *BiConnectorConfigObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type BiConnectorConfigParameters ¶
type BiConnectorConfigParameters struct { // Specifies whether or not BI Connector for Atlas is enabled on the cluster.l // +kubebuilder:validation:Optional Enabled *bool `json:"enabled,omitempty" tf:"enabled,omitempty"` // Specifies the read preference to be used by BI Connector for Atlas on the cluster. Each BI Connector for Atlas read preference contains a distinct combination of readPreference and readPreferenceTags options. For details on BI Connector for Atlas read preferences, refer to the BI Connector Read Preferences Table. // +kubebuilder:validation:Optional ReadPreference *string `json:"readPreference,omitempty" tf:"read_preference,omitempty"` }
func (*BiConnectorConfigParameters) DeepCopy ¶
func (in *BiConnectorConfigParameters) DeepCopy() *BiConnectorConfigParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BiConnectorConfigParameters.
func (*BiConnectorConfigParameters) DeepCopyInto ¶
func (in *BiConnectorConfigParameters) DeepCopyInto(out *BiConnectorConfigParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type Cluster ¶
type Cluster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.name)",message="name is a required parameter" // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.projectId)",message="projectId is a required parameter" // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.providerInstanceSizeName)",message="providerInstanceSizeName is a required parameter" // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.providerName)",message="providerName is a required parameter" Spec ClusterSpec `json:"spec"` Status ClusterStatus `json:"status,omitempty"` }
Cluster is the Schema for the Clusters API. Provides a Cluster resource. +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:subresource:status +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,mongodbatlas}
func (*Cluster) DeepCopy ¶
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
func (*Cluster) DeepCopyInto ¶
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*Cluster) DeepCopyObject ¶
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*Cluster) GetCondition ¶
func (mg *Cluster) GetCondition(ct xpv1.ConditionType) xpv1.Condition
GetCondition of this Cluster.
func (*Cluster) GetConnectionDetailsMapping ¶
GetConnectionDetailsMapping for this Cluster
func (*Cluster) GetDeletionPolicy ¶
func (mg *Cluster) GetDeletionPolicy() xpv1.DeletionPolicy
GetDeletionPolicy of this Cluster.
func (*Cluster) GetManagementPolicy ¶
func (mg *Cluster) GetManagementPolicy() xpv1.ManagementPolicy
GetManagementPolicy of this Cluster.
func (*Cluster) GetObservation ¶
GetObservation of this Cluster
func (*Cluster) GetParameters ¶
GetParameters of this Cluster
func (*Cluster) GetProviderConfigReference ¶
GetProviderConfigReference of this Cluster.
func (*Cluster) GetProviderReference ¶
GetProviderReference of this Cluster. Deprecated: Use GetProviderConfigReference.
func (*Cluster) GetPublishConnectionDetailsTo ¶
func (mg *Cluster) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
GetPublishConnectionDetailsTo of this Cluster.
func (*Cluster) GetTerraformResourceType ¶
GetTerraformResourceType returns Terraform resource type for this Cluster
func (*Cluster) GetTerraformSchemaVersion ¶
GetTerraformSchemaVersion returns the associated Terraform schema version
func (*Cluster) GetWriteConnectionSecretToReference ¶
func (mg *Cluster) GetWriteConnectionSecretToReference() *xpv1.SecretReference
GetWriteConnectionSecretToReference of this Cluster.
func (*Cluster) LateInitialize ¶
LateInitialize this Cluster using its observed tfState. returns True if there are any spec changes for the resource.
func (*Cluster) SetConditions ¶
SetConditions of this Cluster.
func (*Cluster) SetDeletionPolicy ¶
func (mg *Cluster) SetDeletionPolicy(r xpv1.DeletionPolicy)
SetDeletionPolicy of this Cluster.
func (*Cluster) SetManagementPolicy ¶
func (mg *Cluster) SetManagementPolicy(r xpv1.ManagementPolicy)
SetManagementPolicy of this Cluster.
func (*Cluster) SetObservation ¶
SetObservation for this Cluster
func (*Cluster) SetParameters ¶
SetParameters for this Cluster
func (*Cluster) SetProviderConfigReference ¶
SetProviderConfigReference of this Cluster.
func (*Cluster) SetProviderReference ¶
SetProviderReference of this Cluster. Deprecated: Use SetProviderConfigReference.
func (*Cluster) SetPublishConnectionDetailsTo ¶
func (mg *Cluster) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
SetPublishConnectionDetailsTo of this Cluster.
func (*Cluster) SetWriteConnectionSecretToReference ¶
func (mg *Cluster) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
SetWriteConnectionSecretToReference of this Cluster.
type ClusterList ¶
type ClusterList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Cluster `json:"items"` }
ClusterList contains a list of Clusters
func (*ClusterList) DeepCopy ¶
func (in *ClusterList) DeepCopy() *ClusterList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterList.
func (*ClusterList) DeepCopyInto ¶
func (in *ClusterList) DeepCopyInto(out *ClusterList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*ClusterList) DeepCopyObject ¶
func (in *ClusterList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*ClusterList) GetItems ¶
func (l *ClusterList) GetItems() []resource.Managed
GetItems of this ClusterList.
type ClusterObservation ¶
type ClusterObservation struct { AdvancedConfiguration []AdvancedConfigurationObservation `json:"advancedConfiguration,omitempty" tf:"advanced_configuration,omitempty"` // Specifies whether cluster tier auto-scaling is enabled. The default is false. AutoScalingComputeEnabled *bool `json:"autoScalingComputeEnabled,omitempty" tf:"auto_scaling_compute_enabled,omitempty"` // Set to true to enable the cluster tier to scale down. This option is only available if autoScaling.compute.enabled is true. AutoScalingComputeScaleDownEnabled *bool `json:"autoScalingComputeScaleDownEnabled,omitempty" tf:"auto_scaling_compute_scale_down_enabled,omitempty"` // Specifies whether disk auto-scaling is enabled. The default is true. AutoScalingDiskGbEnabled *bool `json:"autoScalingDiskGbEnabled,omitempty" tf:"auto_scaling_disk_gb_enabled,omitempty"` // Cloud service provider on which the server for a multi-tenant cluster is provisioned. BackingProviderName *string `json:"backingProviderName,omitempty" tf:"backing_provider_name,omitempty"` // Legacy Backup - Set to true to enable Atlas legacy backups for the cluster. // Important - MongoDB deprecated the Legacy Backup feature. Clusters that use Legacy Backup can continue to use it. MongoDB recommends using Cloud Backups. // Clusters running MongoDB FCV 4.2 or later and any new Atlas clusters of any type do not support this parameter BackupEnabled *bool `json:"backupEnabled,omitempty" tf:"backup_enabled,omitempty"` // Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. DEPRECATED Use bi_connector_config instead. BiConnector map[string]*string `json:"biConnector,omitempty" tf:"bi_connector,omitempty"` // Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. BiConnectorConfig []BiConnectorConfigObservation `json:"biConnectorConfig,omitempty" tf:"bi_connector_config,omitempty"` // , to enable Cloud Backup. If you create a new Atlas cluster and set backup_enabled to true, the Provider will respond with an error. This change doesn’t affect existing clusters that use legacy backups. CloudBackup *bool `json:"cloudBackup,omitempty" tf:"cloud_backup,omitempty"` // The cluster ID. ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` // Specifies the type of the cluster that you want to modify. You cannot convert a sharded cluster deployment to a replica set deployment. ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` // Set of connection strings that your applications use to connect to this cluster. More info in Connection-strings. Use the parameters in this object to connect your applications to this cluster. To learn more about the formats of connection strings, see Connection String Options. NOTE: Atlas returns the contents of this object after the cluster is operational, not while it builds the cluster. ConnectionStrings []ConnectionStringsObservation `json:"connectionStrings,omitempty" tf:"connection_strings,omitempty"` // The Container ID is the id of the container created when the first cluster in the region (AWS/Azure) or project (GCP) was created. ContainerID *string `json:"containerId,omitempty" tf:"container_id,omitempty"` // Capacity, in gigabytes, of the host’s root volume. Increase this number to add capacity, up to a maximum possible value of 4096 (i.e., 4 TB). This value must be a positive integer. DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` // Possible values are AWS, GCP, AZURE or NONE. Only needed if you desire to manage the keys, see Encryption at Rest using Customer Key Management for complete documentation. You must configure encryption at rest for the Atlas project before enabling it on any cluster in the project. For complete documentation on configuring Encryption at Rest, see Encryption at Rest using Customer Key Management. Requires M10 or greater. and for legacy backups, backup_enabled, to be false or omitted. Note: Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default. EncryptionAtRestProvider *string `json:"encryptionAtRestProvider,omitempty" tf:"encryption_at_rest_provider,omitempty"` // Unique identifer of the replication document for a zone in a Global Cluster. ID *string `json:"id,omitempty" tf:"id,omitempty"` Labels []LabelsObservation `json:"labels,omitempty" tf:"labels,omitempty"` // Version of the cluster to deploy. Atlas supports the following MongoDB versions for M10+ clusters: 4.2, 4.4, 5.0, or 6.0. If omitted, Atlas deploys a cluster that runs MongoDB 5.0. If provider_instance_size_name: M0, M2 or M5, Atlas deploys MongoDB 5.0. Atlas always deploys the cluster with the latest stable release of the specified version. See Release Notes for latest Current Stable Release. MongoDBMajorVersion *string `json:"mongoDbMajorVersion,omitempty" tf:"mongo_db_major_version,omitempty"` // Version of MongoDB the cluster runs, in major-version.minor-version format. MongoDBVersion *string `json:"mongoDbVersion,omitempty" tf:"mongo_db_version,omitempty"` // Base connection string for the cluster. Atlas only displays this field after the cluster is operational, not while it builds the cluster. MongoURI *string `json:"mongoUri,omitempty" tf:"mongo_uri,omitempty"` // Lists when the connection string was last updated. The connection string changes, for example, if you change a replica set to a sharded cluster. MongoURIUpdated *string `json:"mongoUriUpdated,omitempty" tf:"mongo_uri_updated,omitempty"` // connection string for connecting to the Atlas cluster. Includes the replicaSet, ssl, and authSource query parameters in the connection string with values appropriate for the cluster. MongoURIWithOptions *string `json:"mongoUriWithOptions,omitempty" tf:"mongo_uri_with_options,omitempty"` // Name of the cluster as it appears in Atlas. Once the cluster is created, its name cannot be changed. WARNING Changing the name will result in destruction of the existing cluster and the creation of a new cluster. Name *string `json:"name,omitempty" tf:"name,omitempty"` // Number of shards to deploy in the specified zone, minimum 1. NumShards *float64 `json:"numShards,omitempty" tf:"num_shards,omitempty"` // Flag that indicates whether the cluster is paused or not. You can pause M10 or larger clusters. You cannot initiate pausing for a shared/tenant tier cluster. See Considerations for Paused Clusters // NOTE Pause lasts for up to 30 days. If you don't resume the cluster within 30 days, Atlas resumes the cluster. If you prefer to allow the automated change of state to unpaused use: // lifecycle { ignore_changes = [paused] } Paused *bool `json:"paused,omitempty" tf:"paused,omitempty"` // - Flag that indicates if the cluster uses Continuous Cloud Backup. If set to true, cloud_backup must also be set to true. PitEnabled *bool `json:"pitEnabled,omitempty" tf:"pit_enabled,omitempty"` // The unique ID for the project to create the database user. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Maximum instance size to which your cluster can automatically scale (e.g., M40). Required if autoScaling.compute.enabled is true. ProviderAutoScalingComputeMaxInstanceSize *string `json:"providerAutoScalingComputeMaxInstanceSize,omitempty" tf:"provider_auto_scaling_compute_max_instance_size,omitempty"` // Minimum instance size to which your cluster can automatically scale (e.g., M10). Required if autoScaling.compute.scaleDownEnabled is true. ProviderAutoScalingComputeMinInstanceSize *string `json:"providerAutoScalingComputeMinInstanceSize,omitempty" tf:"provider_auto_scaling_compute_min_instance_size,omitempty"` // Flag indicating if the cluster uses Cloud Backup for backups. Deprecated use cloud_backup instead. ProviderBackupEnabled *bool `json:"providerBackupEnabled,omitempty" tf:"provider_backup_enabled,omitempty"` // The maximum input/output operations per second (IOPS) the system can perform. The possible values depend on the selected provider_instance_size_name and disk_size_gb. This setting requires that provider_instance_size_name to be M30 or greater and cannot be used with clusters with local NVMe SSDs. The default value for provider_disk_iops is the same as the cluster tier's Standard IOPS value, as viewable in the Atlas console. It is used in cases where a higher number of IOPS is needed and possible. If a value is submitted that is lower or equal to the default IOPS value for the cluster tier Atlas ignores the requested value and uses the default. More details available under the providerSettings.diskIOPS parameter: MongoDB API Clusters ProviderDiskIops *float64 `json:"providerDiskIops,omitempty" tf:"provider_disk_iops,omitempty"` // Azure disk type of the server’s root volume. If omitted, Atlas uses the default disk type for the selected providerSettings.instanceSizeName. Example disk types and associated storage sizes: P4 - 32GB, P6 - 64GB, P10 - 128GB, P15 - 256GB, P20 - 512GB, P30 - 1024GB, P40 - 2048GB, P50 - 4095GB. More information and the most update to date disk types/storage sizes can be located at https://docs.atlas.mongodb.com/reference/api/clusters-create-one/. ProviderDiskTypeName *string `json:"providerDiskTypeName,omitempty" tf:"provider_disk_type_name,omitempty"` // (Deprecated) The Flag is always true. Flag that indicates whether the Amazon EBS encryption feature encrypts the host's root volume for both data at rest within the volume and for data moving between the volume and the cluster. Note: This setting is always enabled for clusters with local NVMe SSDs. Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default.. ProviderEncryptEBSVolume *bool `json:"providerEncryptEbsVolume,omitempty" tf:"provider_encrypt_ebs_volume,omitempty"` ProviderEncryptEBSVolumeFlag *bool `json:"providerEncryptEbsVolumeFlag,omitempty" tf:"provider_encrypt_ebs_volume_flag,omitempty"` // Atlas provides different instance sizes, each with a default storage capacity and RAM size. The instance size you select is used for all the data-bearing servers in your cluster. See Create a Cluster providerSettings.instanceSizeName for valid values and default resources. ProviderInstanceSizeName *string `json:"providerInstanceSizeName,omitempty" tf:"provider_instance_size_name,omitempty"` // Cloud service provider on which the servers are provisioned. ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` // Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the Atlas region name, see the reference list for AWS, GCP, Azure. // Do not specify this field when creating a multi-region cluster using the replicationSpec document or a Global Cluster with the replicationSpecs array. ProviderRegionName *string `json:"providerRegionName,omitempty" tf:"provider_region_name,omitempty"` // (AWS - Optional) The type of the volume. The possible values are: STANDARD and PROVISIONED. PROVISIONED is ONLY required if setting IOPS higher than the default instance IOPS. // -> NOTE: STANDARD is not available for NVME clusters. ProviderVolumeType *string `json:"providerVolumeType,omitempty" tf:"provider_volume_type,omitempty"` // (Deprecated) Number of replica set members. Each member keeps a copy of your databases, providing high availability and data redundancy. The possible values are 3, 5, or 7. The default value is 3. ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` // Configuration for cluster regions. See Replication Spec below for more details. ReplicationSpecs []ReplicationSpecsObservation `json:"replicationSpecs,omitempty" tf:"replication_specs,omitempty"` // Set to true to retain backup snapshots for the deleted cluster. M10 and above only. // Flag that indicates whether to retain backup snapshots for the deleted dedicated cluster RetainBackupsEnabled *bool `json:"retainBackupsEnabled,omitempty" tf:"retain_backups_enabled,omitempty"` // current snapshot schedule and retention settings for the cluster. SnapshotBackupPolicy []SnapshotBackupPolicyObservation `json:"snapshotBackupPolicy,omitempty" tf:"snapshot_backup_policy,omitempty"` // Connection string for connecting to the Atlas cluster. The +srv modifier forces the connection to use TLS/SSL. See the mongoURI for additional options. SrvAddress *string `json:"srvAddress,omitempty" tf:"srv_address,omitempty"` // Current state of the cluster. The possible states are: StateName *string `json:"stateName,omitempty" tf:"state_name,omitempty"` // Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster. TerminationProtectionEnabled *bool `json:"terminationProtectionEnabled,omitempty" tf:"termination_protection_enabled,omitempty"` // - Release cadence that Atlas uses for this cluster. This parameter defaults to LTS. If you set this field to CONTINUOUS, you must omit the mongo_db_major_version field. Atlas accepts: VersionReleaseSystem *string `json:"versionReleaseSystem,omitempty" tf:"version_release_system,omitempty"` }
func (*ClusterObservation) DeepCopy ¶
func (in *ClusterObservation) DeepCopy() *ClusterObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterObservation.
func (*ClusterObservation) DeepCopyInto ¶
func (in *ClusterObservation) DeepCopyInto(out *ClusterObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterParameters ¶
type ClusterParameters struct { // +kubebuilder:validation:Optional AdvancedConfiguration []AdvancedConfigurationParameters `json:"advancedConfiguration,omitempty" tf:"advanced_configuration,omitempty"` // Specifies whether cluster tier auto-scaling is enabled. The default is false. // +kubebuilder:validation:Optional AutoScalingComputeEnabled *bool `json:"autoScalingComputeEnabled,omitempty" tf:"auto_scaling_compute_enabled,omitempty"` // Set to true to enable the cluster tier to scale down. This option is only available if autoScaling.compute.enabled is true. // +kubebuilder:validation:Optional AutoScalingComputeScaleDownEnabled *bool `json:"autoScalingComputeScaleDownEnabled,omitempty" tf:"auto_scaling_compute_scale_down_enabled,omitempty"` // Specifies whether disk auto-scaling is enabled. The default is true. // +kubebuilder:validation:Optional AutoScalingDiskGbEnabled *bool `json:"autoScalingDiskGbEnabled,omitempty" tf:"auto_scaling_disk_gb_enabled,omitempty"` // Cloud service provider on which the server for a multi-tenant cluster is provisioned. // +kubebuilder:validation:Optional BackingProviderName *string `json:"backingProviderName,omitempty" tf:"backing_provider_name,omitempty"` // Legacy Backup - Set to true to enable Atlas legacy backups for the cluster. // Important - MongoDB deprecated the Legacy Backup feature. Clusters that use Legacy Backup can continue to use it. MongoDB recommends using Cloud Backups. // Clusters running MongoDB FCV 4.2 or later and any new Atlas clusters of any type do not support this parameter // +kubebuilder:validation:Optional BackupEnabled *bool `json:"backupEnabled,omitempty" tf:"backup_enabled,omitempty"` // Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. DEPRECATED Use bi_connector_config instead. // +kubebuilder:validation:Optional BiConnector map[string]*string `json:"biConnector,omitempty" tf:"bi_connector,omitempty"` // Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. // +kubebuilder:validation:Optional BiConnectorConfig []BiConnectorConfigParameters `json:"biConnectorConfig,omitempty" tf:"bi_connector_config,omitempty"` // , to enable Cloud Backup. If you create a new Atlas cluster and set backup_enabled to true, the Provider will respond with an error. This change doesn’t affect existing clusters that use legacy backups. // +kubebuilder:validation:Optional CloudBackup *bool `json:"cloudBackup,omitempty" tf:"cloud_backup,omitempty"` // Specifies the type of the cluster that you want to modify. You cannot convert a sharded cluster deployment to a replica set deployment. // +kubebuilder:validation:Optional ClusterType *string `json:"clusterType,omitempty" tf:"cluster_type,omitempty"` // Capacity, in gigabytes, of the host’s root volume. Increase this number to add capacity, up to a maximum possible value of 4096 (i.e., 4 TB). This value must be a positive integer. // +kubebuilder:validation:Optional DiskSizeGb *float64 `json:"diskSizeGb,omitempty" tf:"disk_size_gb,omitempty"` // Possible values are AWS, GCP, AZURE or NONE. Only needed if you desire to manage the keys, see Encryption at Rest using Customer Key Management for complete documentation. You must configure encryption at rest for the Atlas project before enabling it on any cluster in the project. For complete documentation on configuring Encryption at Rest, see Encryption at Rest using Customer Key Management. Requires M10 or greater. and for legacy backups, backup_enabled, to be false or omitted. Note: Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default. // +kubebuilder:validation:Optional EncryptionAtRestProvider *string `json:"encryptionAtRestProvider,omitempty" tf:"encryption_at_rest_provider,omitempty"` // +kubebuilder:validation:Optional Labels []LabelsParameters `json:"labels,omitempty" tf:"labels,omitempty"` // Version of the cluster to deploy. Atlas supports the following MongoDB versions for M10+ clusters: 4.2, 4.4, 5.0, or 6.0. If omitted, Atlas deploys a cluster that runs MongoDB 5.0. If provider_instance_size_name: M0, M2 or M5, Atlas deploys MongoDB 5.0. Atlas always deploys the cluster with the latest stable release of the specified version. See Release Notes for latest Current Stable Release. // +kubebuilder:validation:Optional MongoDBMajorVersion *string `json:"mongoDbMajorVersion,omitempty" tf:"mongo_db_major_version,omitempty"` // Name of the cluster as it appears in Atlas. Once the cluster is created, its name cannot be changed. WARNING Changing the name will result in destruction of the existing cluster and the creation of a new cluster. // +kubebuilder:validation:Optional Name *string `json:"name,omitempty" tf:"name,omitempty"` // Number of shards to deploy in the specified zone, minimum 1. // +kubebuilder:validation:Optional NumShards *float64 `json:"numShards,omitempty" tf:"num_shards,omitempty"` // Flag that indicates whether the cluster is paused or not. You can pause M10 or larger clusters. You cannot initiate pausing for a shared/tenant tier cluster. See Considerations for Paused Clusters // NOTE Pause lasts for up to 30 days. If you don't resume the cluster within 30 days, Atlas resumes the cluster. If you prefer to allow the automated change of state to unpaused use: // lifecycle { ignore_changes = [paused] } // +kubebuilder:validation:Optional Paused *bool `json:"paused,omitempty" tf:"paused,omitempty"` // - Flag that indicates if the cluster uses Continuous Cloud Backup. If set to true, cloud_backup must also be set to true. // +kubebuilder:validation:Optional PitEnabled *bool `json:"pitEnabled,omitempty" tf:"pit_enabled,omitempty"` // The unique ID for the project to create the database user. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Maximum instance size to which your cluster can automatically scale (e.g., M40). Required if autoScaling.compute.enabled is true. // +kubebuilder:validation:Optional ProviderAutoScalingComputeMaxInstanceSize *string `json:"providerAutoScalingComputeMaxInstanceSize,omitempty" tf:"provider_auto_scaling_compute_max_instance_size,omitempty"` // Minimum instance size to which your cluster can automatically scale (e.g., M10). Required if autoScaling.compute.scaleDownEnabled is true. // +kubebuilder:validation:Optional ProviderAutoScalingComputeMinInstanceSize *string `json:"providerAutoScalingComputeMinInstanceSize,omitempty" tf:"provider_auto_scaling_compute_min_instance_size,omitempty"` // Flag indicating if the cluster uses Cloud Backup for backups. Deprecated use cloud_backup instead. // +kubebuilder:validation:Optional ProviderBackupEnabled *bool `json:"providerBackupEnabled,omitempty" tf:"provider_backup_enabled,omitempty"` // The maximum input/output operations per second (IOPS) the system can perform. The possible values depend on the selected provider_instance_size_name and disk_size_gb. This setting requires that provider_instance_size_name to be M30 or greater and cannot be used with clusters with local NVMe SSDs. The default value for provider_disk_iops is the same as the cluster tier's Standard IOPS value, as viewable in the Atlas console. It is used in cases where a higher number of IOPS is needed and possible. If a value is submitted that is lower or equal to the default IOPS value for the cluster tier Atlas ignores the requested value and uses the default. More details available under the providerSettings.diskIOPS parameter: MongoDB API Clusters // +kubebuilder:validation:Optional ProviderDiskIops *float64 `json:"providerDiskIops,omitempty" tf:"provider_disk_iops,omitempty"` // Azure disk type of the server’s root volume. If omitted, Atlas uses the default disk type for the selected providerSettings.instanceSizeName. Example disk types and associated storage sizes: P4 - 32GB, P6 - 64GB, P10 - 128GB, P15 - 256GB, P20 - 512GB, P30 - 1024GB, P40 - 2048GB, P50 - 4095GB. More information and the most update to date disk types/storage sizes can be located at https://docs.atlas.mongodb.com/reference/api/clusters-create-one/. // +kubebuilder:validation:Optional ProviderDiskTypeName *string `json:"providerDiskTypeName,omitempty" tf:"provider_disk_type_name,omitempty"` // (Deprecated) The Flag is always true. Flag that indicates whether the Amazon EBS encryption feature encrypts the host's root volume for both data at rest within the volume and for data moving between the volume and the cluster. Note: This setting is always enabled for clusters with local NVMe SSDs. Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default.. // +kubebuilder:validation:Optional ProviderEncryptEBSVolume *bool `json:"providerEncryptEbsVolume,omitempty" tf:"provider_encrypt_ebs_volume,omitempty"` // Atlas provides different instance sizes, each with a default storage capacity and RAM size. The instance size you select is used for all the data-bearing servers in your cluster. See Create a Cluster providerSettings.instanceSizeName for valid values and default resources. // +kubebuilder:validation:Optional ProviderInstanceSizeName *string `json:"providerInstanceSizeName,omitempty" tf:"provider_instance_size_name,omitempty"` // Cloud service provider on which the servers are provisioned. // +kubebuilder:validation:Optional ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` // Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the Atlas region name, see the reference list for AWS, GCP, Azure. // Do not specify this field when creating a multi-region cluster using the replicationSpec document or a Global Cluster with the replicationSpecs array. // +kubebuilder:validation:Optional ProviderRegionName *string `json:"providerRegionName,omitempty" tf:"provider_region_name,omitempty"` // (AWS - Optional) The type of the volume. The possible values are: STANDARD and PROVISIONED. PROVISIONED is ONLY required if setting IOPS higher than the default instance IOPS. // -> NOTE: STANDARD is not available for NVME clusters. // +kubebuilder:validation:Optional ProviderVolumeType *string `json:"providerVolumeType,omitempty" tf:"provider_volume_type,omitempty"` // (Deprecated) Number of replica set members. Each member keeps a copy of your databases, providing high availability and data redundancy. The possible values are 3, 5, or 7. The default value is 3. // +kubebuilder:validation:Optional ReplicationFactor *float64 `json:"replicationFactor,omitempty" tf:"replication_factor,omitempty"` // Configuration for cluster regions. See Replication Spec below for more details. // +kubebuilder:validation:Optional ReplicationSpecs []ReplicationSpecsParameters `json:"replicationSpecs,omitempty" tf:"replication_specs,omitempty"` // Set to true to retain backup snapshots for the deleted cluster. M10 and above only. // Flag that indicates whether to retain backup snapshots for the deleted dedicated cluster // +kubebuilder:validation:Optional RetainBackupsEnabled *bool `json:"retainBackupsEnabled,omitempty" tf:"retain_backups_enabled,omitempty"` // Flag that indicates whether termination protection is enabled on the cluster. If set to true, MongoDB Cloud won't delete the cluster. If set to false, MongoDB Cloud will delete the cluster. // +kubebuilder:validation:Optional TerminationProtectionEnabled *bool `json:"terminationProtectionEnabled,omitempty" tf:"termination_protection_enabled,omitempty"` // - Release cadence that Atlas uses for this cluster. This parameter defaults to LTS. If you set this field to CONTINUOUS, you must omit the mongo_db_major_version field. Atlas accepts: // +kubebuilder:validation:Optional VersionReleaseSystem *string `json:"versionReleaseSystem,omitempty" tf:"version_release_system,omitempty"` }
func (*ClusterParameters) DeepCopy ¶
func (in *ClusterParameters) DeepCopy() *ClusterParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterParameters.
func (*ClusterParameters) DeepCopyInto ¶
func (in *ClusterParameters) DeepCopyInto(out *ClusterParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterSpec ¶
type ClusterSpec struct { v1.ResourceSpec `json:",inline"` ForProvider ClusterParameters `json:"forProvider"` }
ClusterSpec defines the desired state of Cluster
func (*ClusterSpec) DeepCopy ¶
func (in *ClusterSpec) DeepCopy() *ClusterSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
func (*ClusterSpec) DeepCopyInto ¶
func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ClusterStatus ¶
type ClusterStatus struct { v1.ResourceStatus `json:",inline"` AtProvider ClusterObservation `json:"atProvider,omitempty"` }
ClusterStatus defines the observed state of Cluster.
func (*ClusterStatus) DeepCopy ¶
func (in *ClusterStatus) DeepCopy() *ClusterStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
func (*ClusterStatus) DeepCopyInto ¶
func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ConnectionStringsObservation ¶
type ConnectionStringsObservation struct { // Private-endpoint-aware mongodb://connection strings for each interface VPC endpoint you configured to connect to this cluster. Returned only if you created a AWS PrivateLink connection to this cluster. DEPRECATED Use connection_strings.private_endpoint[n].connection_string instead. AwsPrivateLink map[string]*string `json:"awsPrivateLink,omitempty" tf:"aws_private_link,omitempty"` // Private-endpoint-aware mongodb+srv://connection strings for each interface VPC endpoint you configured to connect to this cluster. Returned only if you created a AWS PrivateLink connection to this cluster. Use this URI format if your driver supports it. If it doesn’t, use connectionStrings.awsPrivateLink. DEPRECATED Use connection_strings.private_endpoint[n].srv_connection_string instead. AwsPrivateLinkSrv map[string]*string `json:"awsPrivateLinkSrv,omitempty" tf:"aws_private_link_srv,omitempty"` // Network-peering-endpoint-aware mongodb://connection strings for each interface VPC endpoint you configured to connect to this cluster. Returned only if you created a network peering connection to this cluster. Private *string `json:"private,omitempty" tf:"private,omitempty"` // Private endpoint connection strings. Each object describes the connection strings you can use to connect to this cluster through a private endpoint. Atlas returns this parameter only if you deployed a private endpoint to all regions to which you deployed this cluster's nodes. PrivateEndpoint []PrivateEndpointObservation `json:"privateEndpoint,omitempty" tf:"private_endpoint,omitempty"` // Network-peering-endpoint-aware mongodb+srv://connection strings for each interface VPC endpoint you configured to connect to this cluster. Returned only if you created a network peering connection to this cluster. PrivateSrv *string `json:"privateSrv,omitempty" tf:"private_srv,omitempty"` // Public mongodb:// connection string for this cluster. Standard *string `json:"standard,omitempty" tf:"standard,omitempty"` // Public mongodb+srv:// connection string for this cluster. The mongodb+srv protocol tells the driver to look up the seed list of hosts in DNS. Atlas synchronizes this list with the nodes in a cluster. If the connection string uses this URI format, you don’t need to append the seed list or change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn’t , use connectionStrings.standard. StandardSrv *string `json:"standardSrv,omitempty" tf:"standard_srv,omitempty"` }
func (*ConnectionStringsObservation) DeepCopy ¶
func (in *ConnectionStringsObservation) DeepCopy() *ConnectionStringsObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringsObservation.
func (*ConnectionStringsObservation) DeepCopyInto ¶
func (in *ConnectionStringsObservation) DeepCopyInto(out *ConnectionStringsObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ConnectionStringsParameters ¶
type ConnectionStringsParameters struct { }
func (*ConnectionStringsParameters) DeepCopy ¶
func (in *ConnectionStringsParameters) DeepCopy() *ConnectionStringsParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionStringsParameters.
func (*ConnectionStringsParameters) DeepCopyInto ¶
func (in *ConnectionStringsParameters) DeepCopyInto(out *ConnectionStringsParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EndpointsObservation ¶
type EndpointsObservation struct { // Unique identifier of the private endpoint. EndpointID *string `json:"endpointId,omitempty" tf:"endpoint_id,omitempty"` // Cloud service provider on which the servers are provisioned. ProviderName *string `json:"providerName,omitempty" tf:"provider_name,omitempty"` // Region to which you deployed the private endpoint. Region *string `json:"region,omitempty" tf:"region,omitempty"` }
func (*EndpointsObservation) DeepCopy ¶
func (in *EndpointsObservation) DeepCopy() *EndpointsObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsObservation.
func (*EndpointsObservation) DeepCopyInto ¶
func (in *EndpointsObservation) DeepCopyInto(out *EndpointsObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type EndpointsParameters ¶
type EndpointsParameters struct { }
func (*EndpointsParameters) DeepCopy ¶
func (in *EndpointsParameters) DeepCopy() *EndpointsParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsParameters.
func (*EndpointsParameters) DeepCopyInto ¶
func (in *EndpointsParameters) DeepCopyInto(out *EndpointsParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LabelsObservation ¶
type LabelsObservation struct { // The key that you want to write. Key *string `json:"key,omitempty" tf:"key,omitempty"` // The value that you want to write. Value *string `json:"value,omitempty" tf:"value,omitempty"` }
func (*LabelsObservation) DeepCopy ¶
func (in *LabelsObservation) DeepCopy() *LabelsObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsObservation.
func (*LabelsObservation) DeepCopyInto ¶
func (in *LabelsObservation) DeepCopyInto(out *LabelsObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type LabelsParameters ¶
type LabelsParameters struct { // The key that you want to write. // +kubebuilder:validation:Optional Key *string `json:"key,omitempty" tf:"key,omitempty"` // The value that you want to write. // +kubebuilder:validation:Optional Value *string `json:"value,omitempty" tf:"value,omitempty"` }
func (*LabelsParameters) DeepCopy ¶
func (in *LabelsParameters) DeepCopy() *LabelsParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsParameters.
func (*LabelsParameters) DeepCopyInto ¶
func (in *LabelsParameters) DeepCopyInto(out *LabelsParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageFiltersObservation ¶
type OutageFiltersObservation struct { // The cloud provider of the region that undergoes the outage simulation. Following values are supported: CloudProvider *string `json:"cloudProvider,omitempty" tf:"cloud_provider,omitempty"` // The Atlas name of the region to undergo an outage simulation. RegionName *string `json:"regionName,omitempty" tf:"region_name,omitempty"` // The type of cluster outage simulation. Following values are supported: Type *string `json:"type,omitempty" tf:"type,omitempty"` }
func (*OutageFiltersObservation) DeepCopy ¶
func (in *OutageFiltersObservation) DeepCopy() *OutageFiltersObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageFiltersObservation.
func (*OutageFiltersObservation) DeepCopyInto ¶
func (in *OutageFiltersObservation) DeepCopyInto(out *OutageFiltersObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageFiltersParameters ¶
type OutageFiltersParameters struct { // The cloud provider of the region that undergoes the outage simulation. Following values are supported: // +kubebuilder:validation:Required CloudProvider *string `json:"cloudProvider" tf:"cloud_provider,omitempty"` // The Atlas name of the region to undergo an outage simulation. // +kubebuilder:validation:Required RegionName *string `json:"regionName" tf:"region_name,omitempty"` }
func (*OutageFiltersParameters) DeepCopy ¶
func (in *OutageFiltersParameters) DeepCopy() *OutageFiltersParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageFiltersParameters.
func (*OutageFiltersParameters) DeepCopyInto ¶
func (in *OutageFiltersParameters) DeepCopyInto(out *OutageFiltersParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageSimulation ¶
type OutageSimulation struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.clusterName)",message="clusterName is a required parameter" // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.outageFilters)",message="outageFilters is a required parameter" // +kubebuilder:validation:XValidation:rule="self.managementPolicy == 'ObserveOnly' || has(self.forProvider.projectId)",message="projectId is a required parameter" Spec OutageSimulationSpec `json:"spec"` Status OutageSimulationStatus `json:"status,omitempty"` }
OutageSimulation is the Schema for the OutageSimulations API. Provides a Cluster Outage Simulation resource. +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +kubebuilder:subresource:status +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,mongodbatlas}
func (*OutageSimulation) DeepCopy ¶
func (in *OutageSimulation) DeepCopy() *OutageSimulation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulation.
func (*OutageSimulation) DeepCopyInto ¶
func (in *OutageSimulation) DeepCopyInto(out *OutageSimulation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OutageSimulation) DeepCopyObject ¶
func (in *OutageSimulation) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*OutageSimulation) GetCondition ¶
func (mg *OutageSimulation) GetCondition(ct xpv1.ConditionType) xpv1.Condition
GetCondition of this OutageSimulation.
func (*OutageSimulation) GetConnectionDetailsMapping ¶
func (tr *OutageSimulation) GetConnectionDetailsMapping() map[string]string
GetConnectionDetailsMapping for this OutageSimulation
func (*OutageSimulation) GetDeletionPolicy ¶
func (mg *OutageSimulation) GetDeletionPolicy() xpv1.DeletionPolicy
GetDeletionPolicy of this OutageSimulation.
func (*OutageSimulation) GetID ¶
func (tr *OutageSimulation) GetID() string
GetID returns ID of underlying Terraform resource of this OutageSimulation
func (*OutageSimulation) GetManagementPolicy ¶
func (mg *OutageSimulation) GetManagementPolicy() xpv1.ManagementPolicy
GetManagementPolicy of this OutageSimulation.
func (*OutageSimulation) GetObservation ¶
func (tr *OutageSimulation) GetObservation() (map[string]any, error)
GetObservation of this OutageSimulation
func (*OutageSimulation) GetParameters ¶
func (tr *OutageSimulation) GetParameters() (map[string]any, error)
GetParameters of this OutageSimulation
func (*OutageSimulation) GetProviderConfigReference ¶
func (mg *OutageSimulation) GetProviderConfigReference() *xpv1.Reference
GetProviderConfigReference of this OutageSimulation.
func (*OutageSimulation) GetProviderReference ¶
func (mg *OutageSimulation) GetProviderReference() *xpv1.Reference
GetProviderReference of this OutageSimulation. Deprecated: Use GetProviderConfigReference.
func (*OutageSimulation) GetPublishConnectionDetailsTo ¶
func (mg *OutageSimulation) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo
GetPublishConnectionDetailsTo of this OutageSimulation.
func (*OutageSimulation) GetTerraformResourceType ¶
func (mg *OutageSimulation) GetTerraformResourceType() string
GetTerraformResourceType returns Terraform resource type for this OutageSimulation
func (*OutageSimulation) GetTerraformSchemaVersion ¶
func (tr *OutageSimulation) GetTerraformSchemaVersion() int
GetTerraformSchemaVersion returns the associated Terraform schema version
func (*OutageSimulation) GetWriteConnectionSecretToReference ¶
func (mg *OutageSimulation) GetWriteConnectionSecretToReference() *xpv1.SecretReference
GetWriteConnectionSecretToReference of this OutageSimulation.
func (*OutageSimulation) LateInitialize ¶
func (tr *OutageSimulation) LateInitialize(attrs []byte) (bool, error)
LateInitialize this OutageSimulation using its observed tfState. returns True if there are any spec changes for the resource.
func (*OutageSimulation) SetConditions ¶
func (mg *OutageSimulation) SetConditions(c ...xpv1.Condition)
SetConditions of this OutageSimulation.
func (*OutageSimulation) SetDeletionPolicy ¶
func (mg *OutageSimulation) SetDeletionPolicy(r xpv1.DeletionPolicy)
SetDeletionPolicy of this OutageSimulation.
func (*OutageSimulation) SetManagementPolicy ¶
func (mg *OutageSimulation) SetManagementPolicy(r xpv1.ManagementPolicy)
SetManagementPolicy of this OutageSimulation.
func (*OutageSimulation) SetObservation ¶
func (tr *OutageSimulation) SetObservation(obs map[string]any) error
SetObservation for this OutageSimulation
func (*OutageSimulation) SetParameters ¶
func (tr *OutageSimulation) SetParameters(params map[string]any) error
SetParameters for this OutageSimulation
func (*OutageSimulation) SetProviderConfigReference ¶
func (mg *OutageSimulation) SetProviderConfigReference(r *xpv1.Reference)
SetProviderConfigReference of this OutageSimulation.
func (*OutageSimulation) SetProviderReference ¶
func (mg *OutageSimulation) SetProviderReference(r *xpv1.Reference)
SetProviderReference of this OutageSimulation. Deprecated: Use SetProviderConfigReference.
func (*OutageSimulation) SetPublishConnectionDetailsTo ¶
func (mg *OutageSimulation) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo)
SetPublishConnectionDetailsTo of this OutageSimulation.
func (*OutageSimulation) SetWriteConnectionSecretToReference ¶
func (mg *OutageSimulation) SetWriteConnectionSecretToReference(r *xpv1.SecretReference)
SetWriteConnectionSecretToReference of this OutageSimulation.
type OutageSimulationList ¶
type OutageSimulationList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []OutageSimulation `json:"items"` }
OutageSimulationList contains a list of OutageSimulations
func (*OutageSimulationList) DeepCopy ¶
func (in *OutageSimulationList) DeepCopy() *OutageSimulationList
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulationList.
func (*OutageSimulationList) DeepCopyInto ¶
func (in *OutageSimulationList) DeepCopyInto(out *OutageSimulationList)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (*OutageSimulationList) DeepCopyObject ¶
func (in *OutageSimulationList) DeepCopyObject() runtime.Object
DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (*OutageSimulationList) GetItems ¶
func (l *OutageSimulationList) GetItems() []resource.Managed
GetItems of this OutageSimulationList.
type OutageSimulationObservation ¶
type OutageSimulationObservation struct { // Name of the Atlas Cluster that is/will undergoing outage simulation. ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` ID *string `json:"id,omitempty" tf:"id,omitempty"` // (Minimum one required) List of settings that specify the type of cluster outage simulation. OutageFilters []OutageFiltersObservation `json:"outageFilters,omitempty" tf:"outage_filters,omitempty"` // The unique ID for the project that contains the cluster that is/will undergoing outage simulation. ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` // Unique 24-hexadecimal character string that identifies the outage simulation. SimulationID *string `json:"simulationId,omitempty" tf:"simulation_id,omitempty"` // Date and time when MongoDB Cloud started the regional outage simulation. StartRequestDate *string `json:"startRequestDate,omitempty" tf:"start_request_date,omitempty"` // Current phase of the outage simulation: State *string `json:"state,omitempty" tf:"state,omitempty"` }
func (*OutageSimulationObservation) DeepCopy ¶
func (in *OutageSimulationObservation) DeepCopy() *OutageSimulationObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulationObservation.
func (*OutageSimulationObservation) DeepCopyInto ¶
func (in *OutageSimulationObservation) DeepCopyInto(out *OutageSimulationObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageSimulationParameters ¶
type OutageSimulationParameters struct { // Name of the Atlas Cluster that is/will undergoing outage simulation. // +kubebuilder:validation:Optional ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // (Minimum one required) List of settings that specify the type of cluster outage simulation. // +kubebuilder:validation:Optional OutageFilters []OutageFiltersParameters `json:"outageFilters,omitempty" tf:"outage_filters,omitempty"` // The unique ID for the project that contains the cluster that is/will undergoing outage simulation. // +kubebuilder:validation:Optional ProjectID *string `json:"projectId,omitempty" tf:"project_id,omitempty"` }
func (*OutageSimulationParameters) DeepCopy ¶
func (in *OutageSimulationParameters) DeepCopy() *OutageSimulationParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulationParameters.
func (*OutageSimulationParameters) DeepCopyInto ¶
func (in *OutageSimulationParameters) DeepCopyInto(out *OutageSimulationParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageSimulationSpec ¶
type OutageSimulationSpec struct { v1.ResourceSpec `json:",inline"` ForProvider OutageSimulationParameters `json:"forProvider"` }
OutageSimulationSpec defines the desired state of OutageSimulation
func (*OutageSimulationSpec) DeepCopy ¶
func (in *OutageSimulationSpec) DeepCopy() *OutageSimulationSpec
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulationSpec.
func (*OutageSimulationSpec) DeepCopyInto ¶
func (in *OutageSimulationSpec) DeepCopyInto(out *OutageSimulationSpec)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type OutageSimulationStatus ¶
type OutageSimulationStatus struct { v1.ResourceStatus `json:",inline"` AtProvider OutageSimulationObservation `json:"atProvider,omitempty"` }
OutageSimulationStatus defines the observed state of OutageSimulation.
func (*OutageSimulationStatus) DeepCopy ¶
func (in *OutageSimulationStatus) DeepCopy() *OutageSimulationStatus
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageSimulationStatus.
func (*OutageSimulationStatus) DeepCopyInto ¶
func (in *OutageSimulationStatus) DeepCopyInto(out *OutageSimulationStatus)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PoliciesObservation ¶
type PoliciesObservation struct { // Unique identifier for this policy item. ID *string `json:"id,omitempty" tf:"id,omitempty"` // A list of specifications for a policy. PolicyItem []PolicyItemObservation `json:"policyItem,omitempty" tf:"policy_item,omitempty"` }
func (*PoliciesObservation) DeepCopy ¶
func (in *PoliciesObservation) DeepCopy() *PoliciesObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoliciesObservation.
func (*PoliciesObservation) DeepCopyInto ¶
func (in *PoliciesObservation) DeepCopyInto(out *PoliciesObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PoliciesParameters ¶
type PoliciesParameters struct { }
func (*PoliciesParameters) DeepCopy ¶
func (in *PoliciesParameters) DeepCopy() *PoliciesParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoliciesParameters.
func (*PoliciesParameters) DeepCopyInto ¶
func (in *PoliciesParameters) DeepCopyInto(out *PoliciesParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PolicyItemObservation ¶
type PolicyItemObservation struct { // The frequency interval for a set of snapshots. FrequencyInterval *float64 `json:"frequencyInterval,omitempty" tf:"frequency_interval,omitempty"` // A type of frequency (hourly, daily, weekly, monthly). FrequencyType *string `json:"frequencyType,omitempty" tf:"frequency_type,omitempty"` // Unique identifier for this policy item. ID *string `json:"id,omitempty" tf:"id,omitempty"` // The unit of time in which snapshot retention is measured (days, weeks, months). RetentionUnit *string `json:"retentionUnit,omitempty" tf:"retention_unit,omitempty"` // The number of days, weeks, or months the snapshot is retained. RetentionValue *float64 `json:"retentionValue,omitempty" tf:"retention_value,omitempty"` }
func (*PolicyItemObservation) DeepCopy ¶
func (in *PolicyItemObservation) DeepCopy() *PolicyItemObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyItemObservation.
func (*PolicyItemObservation) DeepCopyInto ¶
func (in *PolicyItemObservation) DeepCopyInto(out *PolicyItemObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PolicyItemParameters ¶
type PolicyItemParameters struct { }
func (*PolicyItemParameters) DeepCopy ¶
func (in *PolicyItemParameters) DeepCopy() *PolicyItemParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyItemParameters.
func (*PolicyItemParameters) DeepCopyInto ¶
func (in *PolicyItemParameters) DeepCopyInto(out *PolicyItemParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PrivateEndpointObservation ¶
type PrivateEndpointObservation struct { // Private-endpoint-aware mongodb://connection string for this private endpoint. ConnectionString *string `json:"connectionString,omitempty" tf:"connection_string,omitempty"` // Private endpoint through which you connect to Atlas when you use connection_strings.private_endpoint[n].connection_string or connection_strings.private_endpoint[n].srv_connection_string Endpoints []EndpointsObservation `json:"endpoints,omitempty" tf:"endpoints,omitempty"` // Private-endpoint-aware mongodb+srv:// connection string for this private endpoint. The mongodb+srv protocol tells the driver to look up the seed list of hosts in DNS . Atlas synchronizes this list with the nodes in a cluster. If the connection string uses this URI format, you don't need to: Append the seed list or Change the URI if the nodes change. Use this URI format if your driver supports it. If it doesn't, use connection_strings.private_endpoint[n].connection_string SrvConnectionString *string `json:"srvConnectionString,omitempty" tf:"srv_connection_string,omitempty"` // Private endpoint-aware connection string optimized for sharded clusters that uses the mongodb+srv:// protocol to connect to MongoDB Cloud through a private endpoint. If the connection string uses this Uniform Resource Identifier (URI) format, you don't need to change the Uniform Resource Identifier (URI) if the nodes change. Use this Uniform Resource Identifier (URI) format if your application and Atlas cluster supports it. If it doesn't, use and consult the documentation for connectionStrings.privateEndpoint[n].srvConnectionString. SrvShardOptimizedConnectionString *string `json:"srvShardOptimizedConnectionString,omitempty" tf:"srv_shard_optimized_connection_string,omitempty"` // Type of MongoDB process that you connect to with the connection strings. Atlas returns MONGOD for replica sets, or MONGOS for sharded clusters. Type *string `json:"type,omitempty" tf:"type,omitempty"` }
func (*PrivateEndpointObservation) DeepCopy ¶
func (in *PrivateEndpointObservation) DeepCopy() *PrivateEndpointObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointObservation.
func (*PrivateEndpointObservation) DeepCopyInto ¶
func (in *PrivateEndpointObservation) DeepCopyInto(out *PrivateEndpointObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type PrivateEndpointParameters ¶
type PrivateEndpointParameters struct { }
func (*PrivateEndpointParameters) DeepCopy ¶
func (in *PrivateEndpointParameters) DeepCopy() *PrivateEndpointParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateEndpointParameters.
func (*PrivateEndpointParameters) DeepCopyInto ¶
func (in *PrivateEndpointParameters) DeepCopyInto(out *PrivateEndpointParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RegionsConfigObservation ¶
type RegionsConfigObservation struct { // The number of analytics nodes for Atlas to deploy to the region. Analytics nodes are useful for handling analytic data such as reporting queries from BI Connector for Atlas. Analytics nodes are read-only, and can never become the primary. If you do not specify this option, no analytics nodes are deployed to the region. AnalyticsNodes *float64 `json:"analyticsNodes,omitempty" tf:"analytics_nodes,omitempty"` // Number of electable nodes for Atlas to deploy to the region. Electable nodes can become the primary and can facilitate local reads. ElectableNodes *float64 `json:"electableNodes,omitempty" tf:"electable_nodes,omitempty"` // is 0. Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // Number of read-only nodes for Atlas to deploy to the region. Read-only nodes can never become the primary, but can facilitate local-reads. Specify 0 if you do not want any read-only nodes in the region. ReadOnlyNodes *float64 `json:"readOnlyNodes,omitempty" tf:"read_only_nodes,omitempty"` // Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the Atlas region name, see the reference list for AWS, GCP, Azure. RegionName *string `json:"regionName,omitempty" tf:"region_name,omitempty"` }
func (*RegionsConfigObservation) DeepCopy ¶
func (in *RegionsConfigObservation) DeepCopy() *RegionsConfigObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionsConfigObservation.
func (*RegionsConfigObservation) DeepCopyInto ¶
func (in *RegionsConfigObservation) DeepCopyInto(out *RegionsConfigObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type RegionsConfigParameters ¶
type RegionsConfigParameters struct { // The number of analytics nodes for Atlas to deploy to the region. Analytics nodes are useful for handling analytic data such as reporting queries from BI Connector for Atlas. Analytics nodes are read-only, and can never become the primary. If you do not specify this option, no analytics nodes are deployed to the region. // +kubebuilder:validation:Optional AnalyticsNodes *float64 `json:"analyticsNodes,omitempty" tf:"analytics_nodes,omitempty"` // Number of electable nodes for Atlas to deploy to the region. Electable nodes can become the primary and can facilitate local reads. // +kubebuilder:validation:Optional ElectableNodes *float64 `json:"electableNodes,omitempty" tf:"electable_nodes,omitempty"` // is 0. // +kubebuilder:validation:Optional Priority *float64 `json:"priority,omitempty" tf:"priority,omitempty"` // Number of read-only nodes for Atlas to deploy to the region. Read-only nodes can never become the primary, but can facilitate local-reads. Specify 0 if you do not want any read-only nodes in the region. // +kubebuilder:validation:Optional ReadOnlyNodes *float64 `json:"readOnlyNodes,omitempty" tf:"read_only_nodes,omitempty"` // Physical location of your MongoDB cluster. The region you choose can affect network latency for clients accessing your databases. Requires the Atlas region name, see the reference list for AWS, GCP, Azure. // +kubebuilder:validation:Required RegionName *string `json:"regionName" tf:"region_name,omitempty"` }
func (*RegionsConfigParameters) DeepCopy ¶
func (in *RegionsConfigParameters) DeepCopy() *RegionsConfigParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegionsConfigParameters.
func (*RegionsConfigParameters) DeepCopyInto ¶
func (in *RegionsConfigParameters) DeepCopyInto(out *RegionsConfigParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ReplicationSpecsObservation ¶
type ReplicationSpecsObservation struct { // Unique identifier for this policy item. ID *string `json:"id,omitempty" tf:"id,omitempty"` // Number of shards to deploy in the specified zone, minimum 1. NumShards *float64 `json:"numShards,omitempty" tf:"num_shards,omitempty"` // Physical location of the region. Each regionsConfig document describes the region’s priority in elections and the number and type of MongoDB nodes Atlas deploys to the region. You must order each regionsConfigs document by regionsConfig.priority, descending. See Region Config below for more details. RegionsConfig []RegionsConfigObservation `json:"regionsConfig,omitempty" tf:"regions_config,omitempty"` // Name for the zone in a Global Cluster. ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` }
func (*ReplicationSpecsObservation) DeepCopy ¶
func (in *ReplicationSpecsObservation) DeepCopy() *ReplicationSpecsObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationSpecsObservation.
func (*ReplicationSpecsObservation) DeepCopyInto ¶
func (in *ReplicationSpecsObservation) DeepCopyInto(out *ReplicationSpecsObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type ReplicationSpecsParameters ¶
type ReplicationSpecsParameters struct { // Unique identifier for this policy item. // +kubebuilder:validation:Optional ID *string `json:"id,omitempty" tf:"id,omitempty"` // Number of shards to deploy in the specified zone, minimum 1. // +kubebuilder:validation:Required NumShards *float64 `json:"numShards" tf:"num_shards,omitempty"` // Physical location of the region. Each regionsConfig document describes the region’s priority in elections and the number and type of MongoDB nodes Atlas deploys to the region. You must order each regionsConfigs document by regionsConfig.priority, descending. See Region Config below for more details. // +kubebuilder:validation:Optional RegionsConfig []RegionsConfigParameters `json:"regionsConfig,omitempty" tf:"regions_config,omitempty"` // Name for the zone in a Global Cluster. // +kubebuilder:validation:Optional ZoneName *string `json:"zoneName,omitempty" tf:"zone_name,omitempty"` }
func (*ReplicationSpecsParameters) DeepCopy ¶
func (in *ReplicationSpecsParameters) DeepCopy() *ReplicationSpecsParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationSpecsParameters.
func (*ReplicationSpecsParameters) DeepCopyInto ¶
func (in *ReplicationSpecsParameters) DeepCopyInto(out *ReplicationSpecsParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type SnapshotBackupPolicyObservation ¶
type SnapshotBackupPolicyObservation struct { // Unique identifier of the Atlas cluster. ClusterID *string `json:"clusterId,omitempty" tf:"cluster_id,omitempty"` // Name of the Atlas cluster that contains the snapshot backup policy. ClusterName *string `json:"clusterName,omitempty" tf:"cluster_name,omitempty"` // UTC ISO 8601 formatted point in time when Atlas will take the next snapshot. NextSnapshot *string `json:"nextSnapshot,omitempty" tf:"next_snapshot,omitempty"` // A list of policy definitions for the cluster. Policies []PoliciesObservation `json:"policies,omitempty" tf:"policies,omitempty"` // UTC Hour of day between 0 and 23 representing which hour of the day that Atlas takes a snapshot. ReferenceHourOfDay *float64 `json:"referenceHourOfDay,omitempty" tf:"reference_hour_of_day,omitempty"` // UTC Minute of day between 0 and 59 representing which minute of the referenceHourOfDay that Atlas takes the snapshot. ReferenceMinuteOfHour *float64 `json:"referenceMinuteOfHour,omitempty" tf:"reference_minute_of_hour,omitempty"` // Specifies a restore window in days for the cloud provider backup to maintain. RestoreWindowDays *float64 `json:"restoreWindowDays,omitempty" tf:"restore_window_days,omitempty"` // Specifies it's true to apply the retention changes in the updated backup policy to snapshots that Atlas took previously. UpdateSnapshots *bool `json:"updateSnapshots,omitempty" tf:"update_snapshots,omitempty"` }
func (*SnapshotBackupPolicyObservation) DeepCopy ¶
func (in *SnapshotBackupPolicyObservation) DeepCopy() *SnapshotBackupPolicyObservation
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotBackupPolicyObservation.
func (*SnapshotBackupPolicyObservation) DeepCopyInto ¶
func (in *SnapshotBackupPolicyObservation) DeepCopyInto(out *SnapshotBackupPolicyObservation)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
type SnapshotBackupPolicyParameters ¶
type SnapshotBackupPolicyParameters struct { }
func (*SnapshotBackupPolicyParameters) DeepCopy ¶
func (in *SnapshotBackupPolicyParameters) DeepCopy() *SnapshotBackupPolicyParameters
DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SnapshotBackupPolicyParameters.
func (*SnapshotBackupPolicyParameters) DeepCopyInto ¶
func (in *SnapshotBackupPolicyParameters) DeepCopyInto(out *SnapshotBackupPolicyParameters)
DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.