Documentation
¶
Index ¶
- type AppProfile
- func (r *AppProfile) AppProfileId() pulumi.StringOutput
- func (r *AppProfile) Description() pulumi.StringOutput
- func (r *AppProfile) ID() pulumi.IDOutput
- func (r *AppProfile) IgnoreWarnings() pulumi.BoolOutput
- func (r *AppProfile) Instance() pulumi.StringOutput
- func (r *AppProfile) MultiClusterRoutingUseAny() pulumi.BoolOutput
- func (r *AppProfile) Name() pulumi.StringOutput
- func (r *AppProfile) Project() pulumi.StringOutput
- func (r *AppProfile) SingleClusterRouting() pulumi.Output
- func (r *AppProfile) URN() pulumi.URNOutput
- type AppProfileArgs
- type AppProfileState
- type DataTransferConfig
- func (r *DataTransferConfig) DataRefreshWindowDays() pulumi.IntOutput
- func (r *DataTransferConfig) DataSourceId() pulumi.StringOutput
- func (r *DataTransferConfig) DestinationDatasetId() pulumi.StringOutput
- func (r *DataTransferConfig) Disabled() pulumi.BoolOutput
- func (r *DataTransferConfig) DisplayName() pulumi.StringOutput
- func (r *DataTransferConfig) ID() pulumi.IDOutput
- func (r *DataTransferConfig) Location() pulumi.StringOutput
- func (r *DataTransferConfig) Name() pulumi.StringOutput
- func (r *DataTransferConfig) Params() pulumi.MapOutput
- func (r *DataTransferConfig) Project() pulumi.StringOutput
- func (r *DataTransferConfig) Schedule() pulumi.StringOutput
- func (r *DataTransferConfig) URN() pulumi.URNOutput
- type DataTransferConfigArgs
- type DataTransferConfigState
- type Dataset
- func (r *Dataset) Accesses() pulumi.ArrayOutput
- func (r *Dataset) CreationTime() pulumi.IntOutput
- func (r *Dataset) DatasetId() pulumi.StringOutput
- func (r *Dataset) DefaultEncryptionConfiguration() pulumi.Output
- func (r *Dataset) DefaultPartitionExpirationMs() pulumi.IntOutput
- func (r *Dataset) DefaultTableExpirationMs() pulumi.IntOutput
- func (r *Dataset) DeleteContentsOnDestroy() pulumi.BoolOutput
- func (r *Dataset) Description() pulumi.StringOutput
- func (r *Dataset) Etag() pulumi.StringOutput
- func (r *Dataset) FriendlyName() pulumi.StringOutput
- func (r *Dataset) ID() pulumi.IDOutput
- func (r *Dataset) Labels() pulumi.MapOutput
- func (r *Dataset) LastModifiedTime() pulumi.IntOutput
- func (r *Dataset) Location() pulumi.StringOutput
- func (r *Dataset) Project() pulumi.StringOutput
- func (r *Dataset) SelfLink() pulumi.StringOutput
- func (r *Dataset) URN() pulumi.URNOutput
- type DatasetArgs
- type DatasetState
- type Table
- func (r *Table) Clusterings() pulumi.ArrayOutput
- func (r *Table) CreationTime() pulumi.IntOutput
- func (r *Table) DatasetId() pulumi.StringOutput
- func (r *Table) Description() pulumi.StringOutput
- func (r *Table) Etag() pulumi.StringOutput
- func (r *Table) ExpirationTime() pulumi.IntOutput
- func (r *Table) ExternalDataConfiguration() pulumi.Output
- func (r *Table) FriendlyName() pulumi.StringOutput
- func (r *Table) ID() pulumi.IDOutput
- func (r *Table) Labels() pulumi.MapOutput
- func (r *Table) LastModifiedTime() pulumi.IntOutput
- func (r *Table) Location() pulumi.StringOutput
- func (r *Table) NumBytes() pulumi.IntOutput
- func (r *Table) NumLongTermBytes() pulumi.IntOutput
- func (r *Table) NumRows() pulumi.IntOutput
- func (r *Table) Project() pulumi.StringOutput
- func (r *Table) Schema() pulumi.StringOutput
- func (r *Table) SelfLink() pulumi.StringOutput
- func (r *Table) TableId() pulumi.StringOutput
- func (r *Table) TimePartitioning() pulumi.Output
- func (r *Table) Type() pulumi.StringOutput
- func (r *Table) URN() pulumi.URNOutput
- func (r *Table) View() pulumi.Output
- type TableArgs
- type TableState
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type AppProfile ¶ added in v1.1.0
type AppProfile struct {
// contains filtered or unexported fields
}
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigtable_app_profile.html.markdown.
func GetAppProfile ¶ added in v1.1.0
func GetAppProfile(ctx *pulumi.Context, name string, id pulumi.ID, state *AppProfileState, opts ...pulumi.ResourceOpt) (*AppProfile, error)
GetAppProfile gets an existing AppProfile resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
func NewAppProfile ¶ added in v1.1.0
func NewAppProfile(ctx *pulumi.Context, name string, args *AppProfileArgs, opts ...pulumi.ResourceOpt) (*AppProfile, error)
NewAppProfile registers a new resource with the given unique name, arguments, and options.
func (*AppProfile) AppProfileId ¶ added in v1.1.0
func (r *AppProfile) AppProfileId() pulumi.StringOutput
The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'.
func (*AppProfile) Description ¶ added in v1.1.0
func (r *AppProfile) Description() pulumi.StringOutput
Long form description of the use case for this app profile.
func (*AppProfile) ID ¶ added in v1.1.0
func (r *AppProfile) ID() pulumi.IDOutput
ID is this resource's unique identifier assigned by its provider.
func (*AppProfile) IgnoreWarnings ¶ added in v1.1.0
func (r *AppProfile) IgnoreWarnings() pulumi.BoolOutput
If true, ignore safety checks when deleting/updating the app profile.
func (*AppProfile) Instance ¶ added in v1.1.0
func (r *AppProfile) Instance() pulumi.StringOutput
The name of the instance to create the app profile within.
func (*AppProfile) MultiClusterRoutingUseAny ¶ added in v1.1.0
func (r *AppProfile) MultiClusterRoutingUseAny() pulumi.BoolOutput
If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. Choosing this option sacrifices read-your-writes consistency to improve availability.
func (*AppProfile) Name ¶ added in v1.1.0
func (r *AppProfile) Name() pulumi.StringOutput
The unique name of the requested app profile. Values are of the form 'projects/<project>/instances/<instance>/appProfiles/<appProfileId>'.
func (*AppProfile) Project ¶ added in v1.1.0
func (r *AppProfile) Project() pulumi.StringOutput
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
func (*AppProfile) SingleClusterRouting ¶ added in v1.1.0
func (r *AppProfile) SingleClusterRouting() pulumi.Output
Use a single-cluster routing policy.
func (*AppProfile) URN ¶ added in v1.1.0
func (r *AppProfile) URN() pulumi.URNOutput
URN is this resource's unique name assigned by Pulumi.
type AppProfileArgs ¶ added in v1.1.0
type AppProfileArgs struct { // The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'. AppProfileId interface{} // Long form description of the use case for this app profile. Description interface{} // If true, ignore safety checks when deleting/updating the app profile. IgnoreWarnings interface{} // The name of the instance to create the app profile within. Instance interface{} // If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest // cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. // Choosing this option sacrifices read-your-writes consistency to improve availability. MultiClusterRoutingUseAny interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} // Use a single-cluster routing policy. SingleClusterRouting interface{} }
The set of arguments for constructing a AppProfile resource.
type AppProfileState ¶ added in v1.1.0
type AppProfileState struct { // The unique name of the app profile in the form '[_a-zA-Z0-9][-_.a-zA-Z0-9]*'. AppProfileId interface{} // Long form description of the use case for this app profile. Description interface{} // If true, ignore safety checks when deleting/updating the app profile. IgnoreWarnings interface{} // The name of the instance to create the app profile within. Instance interface{} // If true, read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest // cluster that is available in the event of transient errors or delays. Clusters in a region are considered equidistant. // Choosing this option sacrifices read-your-writes consistency to improve availability. MultiClusterRoutingUseAny interface{} // The unique name of the requested app profile. Values are of the form // 'projects/<project>/instances/<instance>/appProfiles/<appProfileId>'. Name interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} // Use a single-cluster routing policy. SingleClusterRouting interface{} }
Input properties used for looking up and filtering AppProfile resources.
type DataTransferConfig ¶ added in v1.1.0
type DataTransferConfig struct {
// contains filtered or unexported fields
}
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigquery_data_transfer_config.html.markdown.
func GetDataTransferConfig ¶ added in v1.1.0
func GetDataTransferConfig(ctx *pulumi.Context, name string, id pulumi.ID, state *DataTransferConfigState, opts ...pulumi.ResourceOpt) (*DataTransferConfig, error)
GetDataTransferConfig gets an existing DataTransferConfig resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
func NewDataTransferConfig ¶ added in v1.1.0
func NewDataTransferConfig(ctx *pulumi.Context, name string, args *DataTransferConfigArgs, opts ...pulumi.ResourceOpt) (*DataTransferConfig, error)
NewDataTransferConfig registers a new resource with the given unique name, arguments, and options.
func (*DataTransferConfig) DataRefreshWindowDays ¶ added in v1.1.0
func (r *DataTransferConfig) DataRefreshWindowDays() pulumi.IntOutput
The number of days to look back to automatically refresh the data. For example, if dataRefreshWindowDays = 10, then every day BigQuery reingests data for [today-10, today-1], rather than ingesting data for just [today-1]. Only valid if the data source supports the feature. Set the value to 0 to use the default value.
func (*DataTransferConfig) DataSourceId ¶ added in v1.1.0
func (r *DataTransferConfig) DataSourceId() pulumi.StringOutput
The data source id. Cannot be changed once the transfer config is created.
func (*DataTransferConfig) DestinationDatasetId ¶ added in v1.1.0
func (r *DataTransferConfig) DestinationDatasetId() pulumi.StringOutput
The BigQuery target dataset id.
func (*DataTransferConfig) Disabled ¶ added in v1.1.0
func (r *DataTransferConfig) Disabled() pulumi.BoolOutput
When set to true, no runs are scheduled for a given transfer.
func (*DataTransferConfig) DisplayName ¶ added in v1.1.0
func (r *DataTransferConfig) DisplayName() pulumi.StringOutput
The user specified display name for the transfer config.
func (*DataTransferConfig) ID ¶ added in v1.1.0
func (r *DataTransferConfig) ID() pulumi.IDOutput
ID is this resource's unique identifier assigned by its provider.
func (*DataTransferConfig) Location ¶ added in v1.1.0
func (r *DataTransferConfig) Location() pulumi.StringOutput
The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value is US.
func (*DataTransferConfig) Name ¶ added in v1.1.0
func (r *DataTransferConfig) Name() pulumi.StringOutput
The resource name of the transfer config. Transfer config names have the form projects/{projectId}/locations/{location}/transferConfigs/{configId}. Where configId is usually a uuid, but this is not required. The name is ignored when creating a transfer config.
func (*DataTransferConfig) Params ¶ added in v1.1.0
func (r *DataTransferConfig) Params() pulumi.MapOutput
These parameters are specific to each data source.
func (*DataTransferConfig) Project ¶ added in v1.1.0
func (r *DataTransferConfig) Project() pulumi.StringOutput
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
func (*DataTransferConfig) Schedule ¶ added in v1.1.0
func (r *DataTransferConfig) Schedule() pulumi.StringOutput
Data transfer schedule. If the data source does not support a custom schedule, this should be empty. If it is empty, the default value for the data source will be used. The specified times are in UTC. Examples of valid format: 1st,3rd monday of month 15:30, every wed,fri of jan, jun 13:15, and first sunday of quarter 00:00. See more explanation about the format here: https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format NOTE: the granularity should be at least 8 hours, or less frequent.
func (*DataTransferConfig) URN ¶ added in v1.1.0
func (r *DataTransferConfig) URN() pulumi.URNOutput
URN is this resource's unique name assigned by Pulumi.
type DataTransferConfigArgs ¶ added in v1.1.0
type DataTransferConfigArgs struct { // The number of days to look back to automatically refresh the data. For example, if dataRefreshWindowDays = 10, then // every day BigQuery reingests data for [today-10, today-1], rather than ingesting data for just [today-1]. Only valid if // the data source supports the feature. Set the value to 0 to use the default value. DataRefreshWindowDays interface{} // The data source id. Cannot be changed once the transfer config is created. DataSourceId interface{} // The BigQuery target dataset id. DestinationDatasetId interface{} // When set to true, no runs are scheduled for a given transfer. Disabled interface{} // The user specified display name for the transfer config. DisplayName interface{} // The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value // is US. Location interface{} // These parameters are specific to each data source. Params interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} // Data transfer schedule. If the data source does not support a custom schedule, this should be empty. If it is empty, // the default value for the data source will be used. The specified times are in UTC. Examples of valid format: 1st,3rd // monday of month 15:30, every wed,fri of jan, jun 13:15, and first sunday of quarter 00:00. See more explanation about // the format here: // https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format NOTE: the // granularity should be at least 8 hours, or less frequent. Schedule interface{} }
The set of arguments for constructing a DataTransferConfig resource.
type DataTransferConfigState ¶ added in v1.1.0
type DataTransferConfigState struct { // The number of days to look back to automatically refresh the data. For example, if dataRefreshWindowDays = 10, then // every day BigQuery reingests data for [today-10, today-1], rather than ingesting data for just [today-1]. Only valid if // the data source supports the feature. Set the value to 0 to use the default value. DataRefreshWindowDays interface{} // The data source id. Cannot be changed once the transfer config is created. DataSourceId interface{} // The BigQuery target dataset id. DestinationDatasetId interface{} // When set to true, no runs are scheduled for a given transfer. Disabled interface{} // The user specified display name for the transfer config. DisplayName interface{} // The geographic location where the transfer config should reside. Examples: US, EU, asia-northeast1. The default value // is US. Location interface{} // The resource name of the transfer config. Transfer config names have the form // projects/{projectId}/locations/{location}/transferConfigs/{configId}. Where configId is usually a uuid, but this is not // required. The name is ignored when creating a transfer config. Name interface{} // These parameters are specific to each data source. Params interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} // Data transfer schedule. If the data source does not support a custom schedule, this should be empty. If it is empty, // the default value for the data source will be used. The specified times are in UTC. Examples of valid format: 1st,3rd // monday of month 15:30, every wed,fri of jan, jun 13:15, and first sunday of quarter 00:00. See more explanation about // the format here: // https://cloud.google.com/appengine/docs/flexible/python/scheduling-jobs-with-cron-yaml#the_schedule_format NOTE: the // granularity should be at least 8 hours, or less frequent. Schedule interface{} }
Input properties used for looking up and filtering DataTransferConfig resources.
type Dataset ¶
type Dataset struct {
// contains filtered or unexported fields
}
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigquery_dataset.html.markdown.
func GetDataset ¶
func GetDataset(ctx *pulumi.Context, name string, id pulumi.ID, state *DatasetState, opts ...pulumi.ResourceOpt) (*Dataset, error)
GetDataset gets an existing Dataset resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
func NewDataset ¶
func NewDataset(ctx *pulumi.Context, name string, args *DatasetArgs, opts ...pulumi.ResourceOpt) (*Dataset, error)
NewDataset registers a new resource with the given unique name, arguments, and options.
func (*Dataset) Accesses ¶ added in v0.16.0
func (r *Dataset) Accesses() pulumi.ArrayOutput
An array of objects that define dataset access for one or more entities.
func (*Dataset) CreationTime ¶
The time when this dataset was created, in milliseconds since the epoch.
func (*Dataset) DatasetId ¶
func (r *Dataset) DatasetId() pulumi.StringOutput
A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters.
func (*Dataset) DefaultEncryptionConfiguration ¶ added in v1.3.0
The default encryption key for all tables in the dataset. Once this property is set, all newly-created partitioned tables in the dataset will have encryption key set to this value, unless table creation request (or query) overrides the key.
func (*Dataset) DefaultPartitionExpirationMs ¶ added in v0.18.0
The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, all newly-created partitioned tables in the dataset will have an 'expirationMs' property in the 'timePartitioning' settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a partition will have an expiration time of its partition time plus this value. Setting this property overrides the use of 'defaultTableExpirationMs' for partitioned tables: only one of 'defaultTableExpirationMs' and 'defaultPartitionExpirationMs' will be used for any new partitioned table. If you provide an explicit 'timePartitioning.expirationMs' when creating or updating a partitioned table, that value takes precedence over the default partition expiration time indicated by this property.
func (*Dataset) DefaultTableExpirationMs ¶
The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). Once this property is set, all newly-created tables in the dataset will have an 'expirationTime' property set to the creation time plus the value in this property, and changing the value will only affect new tables, not existing ones. When the 'expirationTime' for a given table is reached, that table will be deleted automatically. If a table's 'expirationTime' is modified or removed before the table expires, or if you provide an explicit 'expirationTime' when creating a table, that value takes precedence over the default expiration time indicated by this property.
func (*Dataset) DeleteContentsOnDestroy ¶ added in v0.18.0
func (r *Dataset) DeleteContentsOnDestroy() pulumi.BoolOutput
If set to `true`, delete all the tables in the dataset when destroying the resource; otherwise, destroying the resource will fail if tables are present.
func (*Dataset) Description ¶
func (r *Dataset) Description() pulumi.StringOutput
A user-friendly description of the dataset
func (*Dataset) FriendlyName ¶
func (r *Dataset) FriendlyName() pulumi.StringOutput
A descriptive name for the dataset
func (*Dataset) Labels ¶
The labels associated with this dataset. You can use these to organize and group your datasets
func (*Dataset) LastModifiedTime ¶
The date when this dataset or any of its tables was last modified, in milliseconds since the epoch.
func (*Dataset) Location ¶
func (r *Dataset) Location() pulumi.StringOutput
The geographic location where the dataset should reside. See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). There are two types of locations, regional or multi-regional. A regional location is a specific geographic place, such as Tokyo, and a multi-regional location is a large geographic area, such as the United States, that contains at least two geographic places. Possible regional values include: 'asia-east1', 'asia-northeast1', 'asia-southeast1', 'australia-southeast1', 'europe-north1', 'europe-west2' and 'us-east4'. Possible multi-regional values: 'EU' and 'US'. The default value is multi-regional location 'US'. Changing this forces a new resource to be created.
func (*Dataset) Project ¶
func (r *Dataset) Project() pulumi.StringOutput
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
func (*Dataset) SelfLink ¶
func (r *Dataset) SelfLink() pulumi.StringOutput
The URI of the created resource.
type DatasetArgs ¶
type DatasetArgs struct { // An array of objects that define dataset access for one or more entities. Accesses interface{} // A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or // underscores (_). The maximum length is 1,024 characters. DatasetId interface{} // The default encryption key for all tables in the dataset. Once this property is set, all newly-created partitioned // tables in the dataset will have encryption key set to this value, unless table creation request (or query) overrides // the key. DefaultEncryptionConfiguration interface{} // The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, // all newly-created partitioned tables in the dataset will have an 'expirationMs' property in the 'timePartitioning' // settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a // partition will have an expiration time of its partition time plus this value. Setting this property overrides the use // of 'defaultTableExpirationMs' for partitioned tables: only one of 'defaultTableExpirationMs' and // 'defaultPartitionExpirationMs' will be used for any new partitioned table. If you provide an explicit // 'timePartitioning.expirationMs' when creating or updating a partitioned table, that value takes precedence over the // default partition expiration time indicated by this property. DefaultPartitionExpirationMs interface{} // The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one // hour). Once this property is set, all newly-created tables in the dataset will have an 'expirationTime' property set to // the creation time plus the value in this property, and changing the value will only affect new tables, not existing // ones. When the 'expirationTime' for a given table is reached, that table will be deleted automatically. If a table's // 'expirationTime' is modified or removed before the table expires, or if you provide an explicit 'expirationTime' when // creating a table, that value takes precedence over the default expiration time indicated by this property. DefaultTableExpirationMs interface{} // If set to `true`, delete all the tables in the // dataset when destroying the resource; otherwise, // destroying the resource will fail if tables are present. DeleteContentsOnDestroy interface{} // A user-friendly description of the dataset Description interface{} // A descriptive name for the dataset FriendlyName interface{} // The labels associated with this dataset. You can use these to organize and group your datasets Labels interface{} // The geographic location where the dataset should reside. See [official // docs](https://cloud.google.com/bigquery/docs/dataset-locations). There are two types of locations, regional or // multi-regional. A regional location is a specific geographic place, such as Tokyo, and a multi-regional location is a // large geographic area, such as the United States, that contains at least two geographic places. Possible regional // values include: 'asia-east1', 'asia-northeast1', 'asia-southeast1', 'australia-southeast1', 'europe-north1', // 'europe-west2' and 'us-east4'. Possible multi-regional values: 'EU' and 'US'. The default value is multi-regional // location 'US'. Changing this forces a new resource to be created. Location interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} }
The set of arguments for constructing a Dataset resource.
type DatasetState ¶
type DatasetState struct { // An array of objects that define dataset access for one or more entities. Accesses interface{} // The time when this dataset was created, in milliseconds since the epoch. CreationTime interface{} // A unique ID for this dataset, without the project name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or // underscores (_). The maximum length is 1,024 characters. DatasetId interface{} // The default encryption key for all tables in the dataset. Once this property is set, all newly-created partitioned // tables in the dataset will have encryption key set to this value, unless table creation request (or query) overrides // the key. DefaultEncryptionConfiguration interface{} // The default partition expiration for all partitioned tables in the dataset, in milliseconds. Once this property is set, // all newly-created partitioned tables in the dataset will have an 'expirationMs' property in the 'timePartitioning' // settings set to this value, and changing the value will only affect new tables, not existing ones. The storage in a // partition will have an expiration time of its partition time plus this value. Setting this property overrides the use // of 'defaultTableExpirationMs' for partitioned tables: only one of 'defaultTableExpirationMs' and // 'defaultPartitionExpirationMs' will be used for any new partitioned table. If you provide an explicit // 'timePartitioning.expirationMs' when creating or updating a partitioned table, that value takes precedence over the // default partition expiration time indicated by this property. DefaultPartitionExpirationMs interface{} // The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one // hour). Once this property is set, all newly-created tables in the dataset will have an 'expirationTime' property set to // the creation time plus the value in this property, and changing the value will only affect new tables, not existing // ones. When the 'expirationTime' for a given table is reached, that table will be deleted automatically. If a table's // 'expirationTime' is modified or removed before the table expires, or if you provide an explicit 'expirationTime' when // creating a table, that value takes precedence over the default expiration time indicated by this property. DefaultTableExpirationMs interface{} // If set to `true`, delete all the tables in the // dataset when destroying the resource; otherwise, // destroying the resource will fail if tables are present. DeleteContentsOnDestroy interface{} // A user-friendly description of the dataset Description interface{} // A hash of the resource. Etag interface{} // A descriptive name for the dataset FriendlyName interface{} // The labels associated with this dataset. You can use these to organize and group your datasets Labels interface{} // The date when this dataset or any of its tables was last modified, in milliseconds since the epoch. LastModifiedTime interface{} // The geographic location where the dataset should reside. See [official // docs](https://cloud.google.com/bigquery/docs/dataset-locations). There are two types of locations, regional or // multi-regional. A regional location is a specific geographic place, such as Tokyo, and a multi-regional location is a // large geographic area, such as the United States, that contains at least two geographic places. Possible regional // values include: 'asia-east1', 'asia-northeast1', 'asia-southeast1', 'australia-southeast1', 'europe-north1', // 'europe-west2' and 'us-east4'. Possible multi-regional values: 'EU' and 'US'. The default value is multi-regional // location 'US'. Changing this forces a new resource to be created. Location interface{} // The ID of the project in which the resource belongs. // If it is not provided, the provider project is used. Project interface{} // The URI of the created resource. SelfLink interface{} }
Input properties used for looking up and filtering Dataset resources.
type Table ¶
type Table struct {
// contains filtered or unexported fields
}
Creates a table resource in a dataset for Google BigQuery. For more information see [the official documentation](https://cloud.google.com/bigquery/docs/) and [API](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables).
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/bigquery_table.html.markdown.
func GetTable ¶
func GetTable(ctx *pulumi.Context, name string, id pulumi.ID, state *TableState, opts ...pulumi.ResourceOpt) (*Table, error)
GetTable gets an existing Table resource's state with the given name, ID, and optional state properties that are used to uniquely qualify the lookup (nil if not required).
func NewTable ¶
func NewTable(ctx *pulumi.Context, name string, args *TableArgs, opts ...pulumi.ResourceOpt) (*Table, error)
NewTable registers a new resource with the given unique name, arguments, and options.
func (*Table) Clusterings ¶ added in v1.1.0
func (r *Table) Clusterings() pulumi.ArrayOutput
Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.
func (*Table) CreationTime ¶
The time when this table was created, in milliseconds since the epoch.
func (*Table) DatasetId ¶
func (r *Table) DatasetId() pulumi.StringOutput
The dataset ID to create the table in. Changing this forces a new resource to be created.
func (*Table) Description ¶
func (r *Table) Description() pulumi.StringOutput
The field description.
func (*Table) ExpirationTime ¶
The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.
func (*Table) ExternalDataConfiguration ¶ added in v0.18.13
Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table. Structure is documented below.
func (*Table) FriendlyName ¶
func (r *Table) FriendlyName() pulumi.StringOutput
A descriptive name for the table.
func (*Table) LastModifiedTime ¶
The time when this table was last modified, in milliseconds since the epoch.
func (*Table) Location ¶
func (r *Table) Location() pulumi.StringOutput
The geographic location where the table resides. This value is inherited from the dataset.
func (*Table) NumBytes ¶
The size of this table in bytes, excluding any data in the streaming buffer.
func (*Table) NumLongTermBytes ¶
The number of bytes in the table that are considered "long-term storage".
func (*Table) NumRows ¶
The number of rows of data in this table, excluding any data in the streaming buffer.
func (*Table) Project ¶
func (r *Table) Project() pulumi.StringOutput
The ID of the project in which the resource belongs. If it is not provided, the provider project is used.
func (*Table) Schema ¶
func (r *Table) Schema() pulumi.StringOutput
A JSON schema for the table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables. For more information see the [BigQuery API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource).
func (*Table) SelfLink ¶
func (r *Table) SelfLink() pulumi.StringOutput
The URI of the created resource.
func (*Table) TableId ¶
func (r *Table) TableId() pulumi.StringOutput
A unique ID for the resource. Changing this forces a new resource to be created.
func (*Table) TimePartitioning ¶
If specified, configures time-based partitioning for this table. Structure is documented below.
type TableArgs ¶
type TableArgs struct { // Specifies column names to use for data clustering. // Up to four top-level columns are allowed, and should be specified in // descending priority order. Clusterings interface{} // The dataset ID to create the table in. // Changing this forces a new resource to be created. DatasetId interface{} // The field description. Description interface{} // The time when this table expires, in // milliseconds since the epoch. If not present, the table will persist // indefinitely. Expired tables will be deleted and their storage // reclaimed. ExpirationTime interface{} // Describes the data format, // location, and other properties of a table stored outside of BigQuery. // By defining these properties, the data source can then be queried as // if it were a standard BigQuery table. Structure is documented below. ExternalDataConfiguration interface{} // A descriptive name for the table. FriendlyName interface{} // A mapping of labels to assign to the resource. Labels interface{} // The ID of the project in which the resource belongs. If it // is not provided, the provider project is used. Project interface{} // A JSON schema for the table. Schema is required // for CSV and JSON formats and is disallowed for Google Cloud // Bigtable, Cloud Datastore backups, and Avro formats when using // external tables. For more information see the // [BigQuery API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource). Schema interface{} // A unique ID for the resource. // Changing this forces a new resource to be created. TableId interface{} // If specified, configures time-based // partitioning for this table. Structure is documented below. TimePartitioning interface{} // If specified, configures this table as a view. // Structure is documented below. View interface{} }
The set of arguments for constructing a Table resource.
type TableState ¶
type TableState struct { // Specifies column names to use for data clustering. // Up to four top-level columns are allowed, and should be specified in // descending priority order. Clusterings interface{} // The time when this table was created, in milliseconds since the epoch. CreationTime interface{} // The dataset ID to create the table in. // Changing this forces a new resource to be created. DatasetId interface{} // The field description. Description interface{} // A hash of the resource. Etag interface{} // The time when this table expires, in // milliseconds since the epoch. If not present, the table will persist // indefinitely. Expired tables will be deleted and their storage // reclaimed. ExpirationTime interface{} // Describes the data format, // location, and other properties of a table stored outside of BigQuery. // By defining these properties, the data source can then be queried as // if it were a standard BigQuery table. Structure is documented below. ExternalDataConfiguration interface{} // A descriptive name for the table. FriendlyName interface{} // A mapping of labels to assign to the resource. Labels interface{} // The time when this table was last modified, in milliseconds since the epoch. LastModifiedTime interface{} // The geographic location where the table resides. This value is inherited from the dataset. Location interface{} // The size of this table in bytes, excluding any data in the streaming buffer. NumBytes interface{} // The number of bytes in the table that are considered "long-term storage". NumLongTermBytes interface{} // The number of rows of data in this table, excluding any data in the streaming buffer. NumRows interface{} // The ID of the project in which the resource belongs. If it // is not provided, the provider project is used. Project interface{} // A JSON schema for the table. Schema is required // for CSV and JSON formats and is disallowed for Google Cloud // Bigtable, Cloud Datastore backups, and Avro formats when using // external tables. For more information see the // [BigQuery API documentation](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource). Schema interface{} // The URI of the created resource. SelfLink interface{} // A unique ID for the resource. // Changing this forces a new resource to be created. TableId interface{} // If specified, configures time-based // partitioning for this table. Structure is documented below. TimePartitioning interface{} // Describes the table type. Type interface{} // If specified, configures this table as a view. // Structure is documented below. View interface{} }
Input properties used for looking up and filtering Table resources.