Documentation ¶
Overview ¶
Package bigquery provides a client for the BigQuery service.
Note: This package is a work-in-progress. Backwards-incompatible changes should be expected.
Index ¶
- Constants
- type Client
- func (c *Client) Close() error
- func (c *Client) Copy(ctx context.Context, dst Destination, src Source, options ...Option) (*Job, error)deprecated
- func (c *Client) CreateTable(ctx context.Context, projectID, datasetID, tableID string, ...) (*Table, error)deprecated
- func (c *Client) Dataset(id string) *Dataset
- func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset
- func (c *Client) Datasets(ctx context.Context) *DatasetIterator
- func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator
- func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error)
- func (c *Client) NewGCSReference(uri ...string) *GCSReference
- func (c *Client) OpenTable(projectID, datasetID, tableID string) *Tabledeprecated
- func (c *Client) Query(q string) *Query
- func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error)deprecated
- func (c *Client) Table(projectID, datasetID, tableID string) *Tabledeprecated
- type Compression
- type Copier
- type CopyConfig
- type CreateTableOption
- type DataFormat
- type Dataset
- type DatasetIterator
- type Destination
- type Encoding
- type Error
- type ExternalData
- type ExtractConfig
- type Extractor
- type FieldSchema
- type FieldType
- type GCSReference
- type Iterator
- type Job
- type JobStatus
- type LoadConfig
- type Loader
- type MultiError
- type Option
- func AllowJaggedRows() Optiondeprecated
- func AllowLargeResults() Option
- func AllowQuotedNewlines() Optiondeprecated
- func CreateDisposition(disp TableCreateDisposition) Option
- func DestinationSchema(schema Schema) Optiondeprecated
- func DisableFlattenedResults() Option
- func DisableHeader() Optiondeprecated
- func DisableQueryCache() Optiondeprecated
- func IgnoreUnknownValues() Optiondeprecated
- func JobID(ID string) Option
- func JobPriority(priority string) Option
- func MaxBadRecords(n int64) Optiondeprecated
- func MaxBillingTier(tier int) Option
- func MaxBytesBilled(bytes int64) Option
- func QueryUseStandardSQL() Option
- func WriteDisposition(disp TableWriteDisposition) Option
- type PutMultiError
- type Query
- type QueryConfig
- type QueryPriority
- type ReadOption
- type ReadSource
- type RowInsertionError
- type RowIterator
- type Schema
- type Source
- type State
- type Table
- func (t *Table) CopierFrom(srcs ...*Table) *Copier
- func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error
- func (t *Table) Delete(ctx context.Context) error
- func (t *Table) ExtractorTo(dst *GCSReference) *Extractor
- func (t *Table) FullyQualifiedName() string
- func (t *Table) LoaderFrom(src *GCSReference) *Loader
- func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error)
- func (t *Table) NewUploader(opts ...UploadOption) *Uploader
- func (t *Table) Patch() *TableMetadataPatchdeprecated
- func (t *Table) Read(_ context.Context, options ...ReadOption) (*Iterator, error)
- func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error)
- type TableCreateDisposition
- type TableIterator
- type TableMetadata
- type TableMetadataPatch
- type TableMetadataToUpdate
- type TableType
- type TableWriteDisposition
- type Tables
- type UploadOption
- type Uploader
- type Value
- type ValueList
- type ValueLoader
- type ValueSaver
- type ValuesSaver
Examples ¶
Constants ¶
const Scope = "https://www.googleapis.com/auth/bigquery"
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Client ¶
type Client struct {
// contains filtered or unexported fields
}
Client may be used to perform BigQuery operations.
func NewClient ¶
NewClient constructs a new Client which can perform BigQuery operations. Operations performed via the client are billed to the specified GCP project.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } _ = client // TODO: Use client. }
Output:
func (*Client) Close ¶ added in v0.2.0
Close closes any resources held by the client. Close should be called when the client is no longer needed. It need not be called at program exit.
func (*Client) Copy
deprecated
func (*Client) CreateTable
deprecated
func (*Client) Dataset ¶
Dataset creates a handle to a BigQuery dataset in the client's project.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.Dataset("my-dataset") fmt.Println(ds) }
Output:
func (*Client) DatasetInProject ¶ added in v0.2.0
DatasetInProject creates a handle to a BigQuery dataset in the specified project.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } ds := client.DatasetInProject("their-project-id", "their-dataset") fmt.Println(ds) }
Output:
func (*Client) Datasets ¶ added in v0.2.0
func (c *Client) Datasets(ctx context.Context) *DatasetIterator
Datasets returns an iterator over the datasets in the Client's project.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Datasets(ctx) _ = it // TODO: iterate using Next or iterator.Pager. }
Output:
func (*Client) DatasetsInProject ¶ added in v0.2.0
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator
DatasetsInProject returns an iterator over the datasets in the provided project.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.DatasetsInProject(ctx, "their-project-id") _ = it // TODO: iterate using Next or iterator.Pager. }
Output:
func (*Client) JobFromID ¶
JobFromID creates a Job which refers to an existing BigQuery job. The job need not have been created by this package. For example, the job may have been created in the BigQuery console.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func getJobID() string { return "" } func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. job, err := client.JobFromID(ctx, jobID) if err != nil { // TODO: Handle error. } fmt.Println(job) }
Output:
func (*Client) NewGCSReference ¶
func (c *Client) NewGCSReference(uri ...string) *GCSReference
NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. For more information about the treatment of wildcards and multiple URIs, see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
func (*Client) Query ¶ added in v0.2.0
Query creates a query with string q. The returned Query may optionally be further configured before its Run method is called.
func (*Client) Read
deprecated
func (c *Client) Read(ctx context.Context, src ReadSource, options ...ReadOption) (*Iterator, error)
Read fetches data from a ReadSource and returns the data via an Iterator.
Deprecated: use Query.Read, Job.Read or Table.Read instead.
type Compression ¶
type Compression string
Compression is the type of compression to apply when writing data to Google Cloud Storage.
const ( None Compression = "NONE" Gzip Compression = "GZIP" )
type Copier ¶ added in v0.3.0
type Copier struct { CopyConfig // contains filtered or unexported fields }
A Copier copies data into a BigQuery table from one or more BigQuery tables.
type CopyConfig ¶ added in v0.3.0
type CopyConfig struct { // JobID is the ID to use for the copy job. If unset, a job ID will be automatically created. JobID string // Srcs are the tables from which data will be copied. Srcs []*Table // Dst is the table into which the data will be copied. Dst *Table // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteAppend. WriteDisposition TableWriteDisposition }
CopyConfig holds the configuration for a copy job.
type CreateTableOption ¶
type CreateTableOption interface {
// contains filtered or unexported methods
}
A CreateTableOption is an optional argument to CreateTable.
func TableExpiration ¶
func TableExpiration(exp time.Time) CreateTableOption
TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func UseStandardSQL ¶ added in v0.2.0
func UseStandardSQL() CreateTableOption
UseStandardSQL returns a CreateTableOption to set the table to use standard SQL. The default setting is false (using legacy SQL).
func ViewQuery ¶
func ViewQuery(query string) CreateTableOption
ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. For more information see: https://cloud.google.com/bigquery/querying-data#views
type DataFormat ¶
type DataFormat string
const ( CSV DataFormat = "CSV" Avro DataFormat = "AVRO" JSON DataFormat = "NEWLINE_DELIMITED_JSON" DatastoreBackup DataFormat = "DATASTORE_BACKUP" )
type Dataset ¶
type Dataset struct {
// contains filtered or unexported fields
}
Dataset is a reference to a BigQuery dataset.
func (*Dataset) Create ¶
Create creates a dataset in the BigQuery service. An error will be returned if the dataset already exists.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } if err := client.Dataset("new-dataset").Create(ctx); err != nil { // TODO: Handle error. } }
Output:
func (*Dataset) Table ¶ added in v0.2.0
Table creates a handle to a BigQuery table in the dataset. To determine if a table exists, call Table.Metadata. If the table does not already exist, use Table.Create to create it.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } // Table creates a reference to the table. It does not create the actual // table in BigQuery; to do so, use Table.Create. t := client.Dataset("my-dataset").Table("my-table") fmt.Println(t) }
Output:
func (*Dataset) Tables ¶ added in v0.2.0
func (d *Dataset) Tables(ctx context.Context) *TableIterator
Tables returns an iterator over the tables in the Dataset.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Dataset("my-dataset").Tables(ctx) _ = it // TODO: iterate using Next or iterator.Pager. }
Output:
type DatasetIterator ¶ added in v0.2.0
type DatasetIterator struct { // ListHidden causes hidden datasets to be listed when set to true. ListHidden bool // Filter restricts the datasets returned by label. The filter syntax is described in // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels Filter string // contains filtered or unexported fields }
DatasetIterator iterates over the datasets in a project.
func (*DatasetIterator) Next ¶ added in v0.2.0
func (it *DatasetIterator) Next() (*Dataset, error)
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Datasets(ctx) for { ds, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(ds) } }
Output:
func (*DatasetIterator) PageInfo ¶ added in v0.2.0
func (it *DatasetIterator) PageInfo() *iterator.PageInfo
PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
type Destination ¶
type Destination interface {
// contains filtered or unexported methods
}
A Destination is a destination of data for the Copy function.
type Encoding ¶
type Encoding string
Encoding specifies the character encoding of data to be loaded into BigQuery. See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding for more details about how this is used.
type Error ¶
type Error struct {
// Mirrors bq.ErrorProto, but drops DebugInfo
Location, Message, Reason string
}
An Error contains detailed information about a failed bigquery operation.
type ExternalData ¶ added in v0.3.0
type ExternalData interface {
// contains filtered or unexported methods
}
ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
type ExtractConfig ¶ added in v0.3.0
type ExtractConfig struct { // JobID is the ID to use for the extract job. If empty, a job ID will be automatically created. JobID string // Src is the table from which data will be extracted. Src *Table // Dst is the destination into which the data will be extracted. Dst *GCSReference // DisableHeader disables the printing of a header row in exported data. DisableHeader bool }
ExtractConfig holds the configuration for an extract job.
type Extractor ¶ added in v0.3.0
type Extractor struct { ExtractConfig // contains filtered or unexported fields }
An Extractor extracts data from a BigQuery table into Google Cloud Storage.
type FieldSchema ¶
type FieldSchema struct { // The field name. // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), // and must start with a letter or underscore. // The maximum length is 128 characters. Name string // A description of the field. The maximum length is 16,384 characters. Description string // Whether the field may contain multiple values. Repeated bool // Whether the field is required. Ignored if Repeated is true. Required bool // The field data type. If Type is Record, then this field contains a nested schema, // which is described by Schema. Type FieldType // Describes the nested schema if Type is set to Record. Schema Schema }
type GCSReference ¶
type GCSReference struct { // FieldDelimiter is the separator for fields in a CSV file, used when reading or exporting data. // The default is ",". FieldDelimiter string // The number of rows at the top of a CSV file that BigQuery will skip when reading data. SkipLeadingRows int64 // SourceFormat is the format of the GCS data to be read. // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. SourceFormat DataFormat // AllowJaggedRows causes missing trailing optional columns to be tolerated when reading CSV data. Missing values are treated as nulls. AllowJaggedRows bool // AllowQuotedNewlines sets whether quoted data sections containing newlines are allowed when reading CSV data. AllowQuotedNewlines bool // Encoding is the character encoding of data to be read. Encoding Encoding // MaxBadRecords is the maximum number of bad records that will be ignored when reading data. MaxBadRecords int64 // IgnoreUnknownValues causes values not matching the schema to be tolerated. // Unknown values are ignored. For CSV this ignores extra values at the end of a line. // For JSON this ignores named values that do not match any column name. // If this field is not set, records containing unknown values are treated as bad records. // The MaxBadRecords field can be used to customize how bad records are handled. IgnoreUnknownValues bool // Schema describes the data. It is required when reading CSV or JSON data, unless the data is being loaded into a table that already exists. Schema Schema // Quote is the value used to quote data sections in a CSV file. // The default quotation character is the double quote ("), which is used if both Quote and ForceZeroQuote are unset. // To specify that no character should be interpreted as a quotation character, set ForceZeroQuote to true. // Only used when reading data. Quote string ForceZeroQuote bool // DestinationFormat is the format to use when writing exported files. // Allowed values are: CSV, Avro, JSON. The default is CSV. // CSV is not supported for tables with nested or repeated fields. DestinationFormat DataFormat // Compression specifies the type of compression to apply when writing data to Google Cloud Storage, // or using this GCSReference as an ExternalData source with CSV or JSON SourceFormat. // Default is None. Compression Compression // contains filtered or unexported fields }
GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute an input or output to a BigQuery operation.
type Iterator ¶
type Iterator struct {
// contains filtered or unexported fields
}
Iterator provides access to the result of a BigQuery lookup. Next must be called before the first call to Get.
func (*Iterator) Next ¶
Next advances the Iterator to the next row, making that row available via the Get method. Next must be called before the first call to Get or Schema, and blocks until data is available. Next returns false when there are no more rows available, either because the end of the output was reached, or because there was an error (consult the Err method to determine which).
type Job ¶
type Job struct {
// contains filtered or unexported fields
}
A Job represents an operation which has been submitted to BigQuery for processing.
func (*Job) Cancel ¶ added in v0.2.0
Cancel requests that a job be cancelled. This method returns without waiting for cancellation to take effect. To check whether the job has terminated, use Job.Status. Cancelled jobs may still incur costs.
type JobStatus ¶
type JobStatus struct { State State // All errors encountered during the running of the job. // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. Errors []*Error // contains filtered or unexported fields }
JobStatus contains the current State of a job, and errors encountered while processing that job.
type LoadConfig ¶ added in v0.3.0
type LoadConfig struct { // JobID is the ID to use for the load job. If unset, a job ID will be automatically created. JobID string // Src is the source from which data will be loaded. Src *GCSReference // Dst is the table into which the data will be loaded. Dst *Table // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteAppend. WriteDisposition TableWriteDisposition }
LoadConfig holds the configuration for a load job.
type Loader ¶ added in v0.3.0
type Loader struct { LoadConfig // contains filtered or unexported fields }
A Loader loads data from Google Cloud Storage into a BigQuery table.
type MultiError ¶
type MultiError []error
A MultiError contains multiple related errors.
func (MultiError) Error ¶
func (m MultiError) Error() string
type Option ¶
type Option interface {
// contains filtered or unexported methods
}
An Option is an optional argument to Copy.
func AllowJaggedRows
deprecated
func AllowJaggedRows() Option
AllowJaggedRows returns an Option that causes missing trailing optional columns to be tolerated in CSV data. Missing values are treated as nulls.
Deprecated: use GCSReference.AllowJaggedRows instead.
func AllowLargeResults ¶
func AllowLargeResults() Option
AllowLargeResults returns an Option that allows the query to produce arbitrarily large result tables. The destination must be a table. When using this option, queries will take longer to execute, even if the result set is small. For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults Deprecated: use Query.AllowLargeResults instead.
func AllowQuotedNewlines
deprecated
func AllowQuotedNewlines() Option
AllowQuotedNewlines returns an Option that allows quoted data sections containing newlines in CSV data.
Deprecated: use GCSReference.AllowQuotedNewlines instead.
func CreateDisposition ¶
func CreateDisposition(disp TableCreateDisposition) Option
CreateDisposition returns an Option that specifies the TableCreateDisposition to use. Deprecated: use the CreateDisposition field in Query, CopyConfig or LoadConfig instead.
func DestinationSchema
deprecated
DestinationSchema returns an Option that specifies the schema to use when loading data into a new table. A DestinationSchema Option must be supplied when loading data from Google Cloud Storage into a non-existent table. Caveat: DestinationSchema is not required if the data being loaded is a datastore backup. schema must not be nil.
Deprecated: use GCSReference.Schema instead.
func DisableFlattenedResults ¶
func DisableFlattenedResults() Option
DisableFlattenedResults returns an Option that prevents results being flattened. If this Option is not used, results from nested and repeated fields are flattened. DisableFlattenedResults implies AllowLargeResults For more information, see https://cloud.google.com/bigquery/docs/data#nested Deprecated: use Query.DisableFlattenedResults instead.
func DisableHeader
deprecated
func DisableHeader() Option
DisableHeader returns an Option that disables the printing of a header row in exported data.
Deprecated: use Extractor.DisableHeader instead.
func DisableQueryCache
deprecated
func DisableQueryCache() Option
DisableQueryCache returns an Option that prevents results being fetched from the query cache. If this Option is not used, results are fetched from the cache if they are available. The query cache is a best-effort cache that is flushed whenever tables in the query are modified. Cached results are only available when TableID is unspecified in the query's destination Table. For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
Deprecated: use Query.DisableQueryCache instead.
func IgnoreUnknownValues
deprecated
func IgnoreUnknownValues() Option
IgnoreUnknownValues returns an Option that causes values not matching the schema to be tolerated. Unknown values are ignored. For CSV this ignores extra values at the end of a line. For JSON this ignores named values that do not match any column name. If this Option is not used, records containing unknown values are treated as bad records. The MaxBadRecords Option can be used to customize how bad records are handled.
Deprecated: use GCSReference.IgnoreUnknownValues instead.
func JobID ¶
JobID returns an Option that sets the job ID of a BigQuery job. If this Option is not used, a job ID is generated automatically.
func JobPriority ¶
JobPriority returns an Option that causes a query to be scheduled with the specified priority. The default priority is InteractivePriority. For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries Deprecated: use Query.Priority instead.
func MaxBadRecords
deprecated
func MaxBillingTier ¶ added in v0.2.0
MaxBillingTier returns an Option that sets the maximum billing tier for a Query. Queries that have resource usage beyond this tier will fail (without incurring a charge). If this Option is not used, the project default will be used. Deprecated: use Query.MaxBillingTier instead.
func MaxBytesBilled ¶ added in v0.2.0
MaxBytesBilled returns an Option that limits the number of bytes billed for this job. Queries that would exceed this limit will fail (without incurring a charge). If this Option is not used, or bytes is < 1, the project default will be used. Deprecated: use Query.MaxBytesBilled instead.
func QueryUseStandardSQL ¶ added in v0.2.0
func QueryUseStandardSQL() Option
QueryUseStandardSQL returns an Option that set the query to use standard SQL. The default setting is false (using legacy SQL). Deprecated: use Query.UseStandardSQL instead.
func WriteDisposition ¶
func WriteDisposition(disp TableWriteDisposition) Option
WriteDisposition returns an Option that specifies the TableWriteDisposition to use. Deprecated: use the WriteDisposition field in Query, CopyConfig or LoadConfig instead.
type PutMultiError ¶
type PutMultiError []RowInsertionError
PutMultiError contains an error for each row which was not successfully inserted into a BigQuery table.
func (PutMultiError) Error ¶
func (pme PutMultiError) Error() string
type Query ¶
type Query struct { QueryConfig // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. // // Deprecated: use QueryConfig.Q instead. Q string // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. // If DefaultProjectID is set, DefaultDatasetID must also be set. DefaultProjectID string // Deprecated: use QueryConfig.DefaultProjectID instead. DefaultDatasetID string // Deprecated: use QueryConfig.DefaultDatasetID instead. // TableDefinitions describes data sources outside of BigQuery. // The map keys may be used as table names in the query string. // // Deprecated: use QueryConfig.TableDefinitions instead. TableDefinitions map[string]ExternalData // contains filtered or unexported fields }
A Query queries data from a BigQuery table. Use Client.Query to create a Query.
type QueryConfig ¶ added in v0.3.0
type QueryConfig struct { // JobID is the ID to use for the query job. If this field is empty, a job ID // will be automatically created. JobID string // Dst is the table into which the results of the query will be written. // If this field is nil, a temporary table will be created. Dst *Table // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. Q string // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. // If DefaultProjectID is set, DefaultDatasetID must also be set. DefaultProjectID string DefaultDatasetID string // TableDefinitions describes data sources outside of BigQuery. // The map keys may be used as table names in the query string. TableDefinitions map[string]ExternalData // CreateDisposition specifies the circumstances under which the destination table will be created. // The default is CreateIfNeeded. CreateDisposition TableCreateDisposition // WriteDisposition specifies how existing data in the destination table is treated. // The default is WriteAppend. WriteDisposition TableWriteDisposition // DisableQueryCache prevents results being fetched from the query cache. // If this field is false, results are fetched from the cache if they are available. // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. // Cached results are only available when TableID is unspecified in the query's destination Table. // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching DisableQueryCache bool // DisableFlattenedResults prevents results being flattened. // If this field is false, results from nested and repeated fields are flattened. // DisableFlattenedResults implies AllowLargeResults // For more information, see https://cloud.google.com/bigquery/docs/data#nested DisableFlattenedResults bool // AllowLargeResults allows the query to produce arbitrarily large result tables. // The destination must be a table. // When using this option, queries will take longer to execute, even if the result set is small. // For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults AllowLargeResults bool // Priority species the priority with which to schedule the query. // The default priority is InteractivePriority. // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries Priority QueryPriority // MaxBillingTier sets the maximum billing tier for a Query. // Queries that have resource usage beyond this tier will fail (without // incurring a charge). If this field is zero, the project default will be used. MaxBillingTier int // MaxBytesBilled limits the number of bytes billed for // this job. Queries that would exceed this limit will fail (without incurring // a charge). // If this field is less than 1, the project default will be // used. MaxBytesBilled int64 // UseStandardSQL causes the query to use standard SQL. // The default is false (using legacy SQL). UseStandardSQL bool }
QueryConfig holds the configuration for a query job.
type QueryPriority ¶ added in v0.3.0
type QueryPriority string
QueryPriority species a priority with which a query is to be executed.
const ( BatchPriority QueryPriority = "BATCH" InteractivePriority QueryPriority = "INTERACTIVE" )
type ReadOption ¶
type ReadOption interface {
// contains filtered or unexported methods
}
A ReadOption is an optional argument to Read.
func RecordsPerRequest ¶
func RecordsPerRequest(n int64) ReadOption
RecordsPerRequest returns a ReadOption that sets the number of records to fetch per request when streaming data from BigQuery.
func StartIndex ¶
func StartIndex(i uint64) ReadOption
StartIndex returns a ReadOption that sets the zero-based index of the row to start reading from.
type ReadSource ¶
type ReadSource interface {
// contains filtered or unexported methods
}
A ReadSource is a source of data for the Read function.
type RowInsertionError ¶
type RowInsertionError struct { InsertID string // The InsertID associated with the affected row. RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. Errors MultiError }
RowInsertionError contains all errors that occurred when attempting to insert a row.
func (*RowInsertionError) Error ¶
func (e *RowInsertionError) Error() string
type RowIterator ¶ added in v0.3.0
type RowIterator struct { // StartIndex can be set before the first call to Next. If PageInfo().PageToken // is also set, StartIndex is ignored. StartIndex uint64 // contains filtered or unexported fields }
A RowIterator provides access to the result of a BigQuery lookup.
func (*RowIterator) Next ¶ added in v0.3.0
func (it *RowIterator) Next(dst ValueLoader) error
Next loads the next row into dst. Its return value is iterator.Done if there are no more results. Once Next returns iterator.Done, all subsequent calls will return iterator.Done.
func (*RowIterator) PageInfo ¶ added in v0.3.0
func (it *RowIterator) PageInfo() *iterator.PageInfo
PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (*RowIterator) Schema ¶ added in v0.3.0
func (it *RowIterator) Schema() (Schema, error)
Schema returns the schema of the result rows.
type Schema ¶
type Schema []*FieldSchema
Schema describes the fields in a table or query result.
func InferSchema ¶
InferSchema tries to derive a BigQuery schema from the supplied struct value. NOTE: All fields in the returned Schema are configured to be required, unless the corresponding field in the supplied struct is a slice or array. It is considered an error if the struct (including nested structs) contains any exported fields that are pointers or one of the following types: map, interface, complex64, complex128, func, chan. In these cases, an error will be returned. Future versions may handle these cases without error.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" ) func main() { type Item struct { Name string Size float64 Count int } schema, err := bigquery.InferSchema(Item{}) if err != nil { fmt.Println(err) // TODO: Handle error. } for _, fs := range schema { fmt.Println(fs.Name, fs.Type) } }
Output: Name STRING Size FLOAT Count INTEGER
type Source ¶
type Source interface {
// contains filtered or unexported methods
}
A Source is a source of data for the Copy function.
type State ¶
type State int
State is one of a sequence of states that a Job progresses through as it is processed.
type Table ¶
type Table struct { // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. // In this case the result will be stored in an ephemeral table. ProjectID string DatasetID string // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). // The maximum length is 1,024 characters. TableID string // contains filtered or unexported fields }
A Table is a reference to a BigQuery table.
func (*Table) CopierFrom ¶ added in v0.3.0
CopierFrom returns a Copier which can be used to copy data into a BigQuery table from one or more BigQuery tables. The returned Copier may optionally be further configured before its Run method is called.
func (*Table) Create ¶ added in v0.2.0
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error
Create creates a table in the BigQuery service.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } t := client.Dataset("my-dataset").Table("new-table") if err := t.Create(ctx); err != nil { // TODO: Handle error. } }
Output:
func (*Table) Delete ¶
Delete deletes the table.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } if err := client.Dataset("my-dataset").Table("my-table").Delete(ctx); err != nil { // TODO: Handle error. } }
Output:
func (*Table) ExtractorTo ¶ added in v0.3.0
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor
ExtractorTo returns an Extractor which can be used to extract data from a BigQuery table into Google Cloud Storage. The returned Extractor may optionally be further configured before its Run method is called.
func (*Table) FullyQualifiedName ¶
FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
func (*Table) LoaderFrom ¶ added in v0.3.0
func (t *Table) LoaderFrom(src *GCSReference) *Loader
LoaderFrom returns a Loader which can be used to load data from Google Cloud Storage into a BigQuery table. The returned Loader may optionally be further configured before its Run method is called.
func (*Table) Metadata ¶
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error)
Metadata fetches the metadata for the table.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } md, err := client.Dataset("my-dataset").Table("my-table").Metadata(ctx) if err != nil { // TODO: Handle error. } fmt.Println(md) }
Output:
func (*Table) NewUploader ¶
func (t *Table) NewUploader(opts ...UploadOption) *Uploader
NewUploader returns an *Uploader that can be used to append rows to t.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my-dataset").Table("my-table").NewUploader() _ = u // TODO: Use u. }
Output:
func (*Table) Patch
deprecated
func (t *Table) Patch() *TableMetadataPatch
Patch returns a *TableMetadataPatch, which can be used to modify specific Table metadata fields. In order to apply the changes, the TableMetadataPatch's Apply method must be called.
Deprecated: use Table.Update instead.
func (*Table) Update ¶ added in v0.3.0
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error)
Update modifies specific Table metadata fields.
type TableCreateDisposition ¶
type TableCreateDisposition string
CreateDisposition specifies the circumstances under which destination table will be created. Default is CreateIfNeeded.
const ( // The table will be created if it does not already exist. Tables are created atomically on successful completion of a job. CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" // The table must already exist and will not be automatically created. CreateNever TableCreateDisposition = "CREATE_NEVER" )
type TableIterator ¶ added in v0.2.0
type TableIterator struct {
// contains filtered or unexported fields
}
A TableIterator is an iterator over Tables.
func (*TableIterator) Next ¶ added in v0.2.0
func (it *TableIterator) Next() (*Table, error)
Next returns the next result. Its second return value is Done if there are no more results. Once Next returns Done, all subsequent calls will return Done.
Example ¶
package main import ( "fmt" "cloud.google.com/go/bigquery" "golang.org/x/net/context" "google.golang.org/api/iterator" ) func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } it := client.Dataset("my-dataset").Tables(ctx) for { t, err := it.Next() if err == iterator.Done { break } if err != nil { // TODO: Handle error. } fmt.Println(t) } }
Output:
func (*TableIterator) PageInfo ¶ added in v0.2.0
func (it *TableIterator) PageInfo() *iterator.PageInfo
PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
type TableMetadata ¶
type TableMetadata struct { Description string // The user-friendly description of this table. Name string // The user-friendly name for this table. Schema Schema View string ID string // An opaque ID uniquely identifying the table. Type TableType // The time when this table expires. If not set, the table will persist // indefinitely. Expired tables will be deleted and their storage reclaimed. ExpirationTime time.Time CreationTime time.Time LastModifiedTime time.Time // The size of the table in bytes. // This does not include data that is being buffered during a streaming insert. NumBytes int64 // The number of rows of data in this table. // This does not include data that is being buffered during a streaming insert. NumRows uint64 }
TableMetadata contains information about a BigQuery table.
type TableMetadataPatch ¶
type TableMetadataPatch struct {
// contains filtered or unexported fields
}
TableMetadataPatch represents a set of changes to a table's metadata.
func (*TableMetadataPatch) Apply
deprecated
func (p *TableMetadataPatch) Apply(ctx context.Context) (*TableMetadata, error)
Apply applies the patch operation.
Deprecated: use Table.Update instead.
func (*TableMetadataPatch) Description
deprecated
func (p *TableMetadataPatch) Description(desc string)
Description sets the table description.
Deprecated: use Table.Update instead.
func (*TableMetadataPatch) Name
deprecated
func (p *TableMetadataPatch) Name(name string)
Name sets the table name.
Deprecated: use Table.Update instead.
type TableMetadataToUpdate ¶ added in v0.3.0
type TableMetadataToUpdate struct { // Description is the user-friendly description of this table. Description optional.String // Name is the user-friendly name for this table. Name optional.String }
TableMetadataToUpdate is used when updating a table's metadata. Only non-nil fields will be updated.
type TableWriteDisposition ¶
type TableWriteDisposition string
TableWriteDisposition specifies how existing data in a destination table is treated. Default is WriteAppend.
const ( // Data will be appended to any existing data in the destination table. // Data is appended atomically on successful completion of a job. WriteAppend TableWriteDisposition = "WRITE_APPEND" // Existing data in the destination table will be overwritten. // Data is overwritten atomically on successful completion of a job. WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" // Writes will fail if the destination table already contains data. WriteEmpty TableWriteDisposition = "WRITE_EMPTY" )
type Tables ¶
type Tables []*Table
Tables is a group of tables. The tables may belong to differing projects or datasets.
type UploadOption ¶
type UploadOption interface {
// contains filtered or unexported methods
}
An UploadOption is an optional argument to NewUploader.
func SkipInvalidRows ¶
func SkipInvalidRows() UploadOption
SkipInvalidRows returns an UploadOption that causes rows containing invalid data to be silently ignored. The default value is false, which causes the entire request to fail, if there is an attempt to insert an invalid row.
func TableTemplateSuffix ¶
func TableTemplateSuffix(suffix string) UploadOption
A TableTemplateSuffix allows Uploaders to create tables automatically.
Experimental: this option is experimental and may be modified or removed in future versions, regardless of any other documented package stability guarantees.
When you specify a suffix, the table you upload data to will be used as a template for creating a new table, with the same schema, called <table> + <suffix>.
More information is available at https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
func UploadIgnoreUnknownValues ¶
func UploadIgnoreUnknownValues() UploadOption
UploadIgnoreUnknownValues returns an UploadOption that causes values not matching the schema to be ignored. If this option is not used, records containing such values are treated as invalid records.
type Uploader ¶
type Uploader struct {
// contains filtered or unexported fields
}
An Uploader does streaming inserts into a BigQuery table. It is safe for concurrent use.
func (*Uploader) Put ¶
Put uploads one or more rows to the BigQuery service. src must implement ValueSaver or be a slice of ValueSavers. Put returns a PutMultiError if one or more rows failed to be uploaded. The PutMultiError contains a RowInsertionError for each failed row.
Example ¶
package main import ( "cloud.google.com/go/bigquery" "golang.org/x/net/context" ) type Item struct { Name string Size float64 Count int } // Save implements the ValueSaver interface. func (i *Item) Save() (map[string]bigquery.Value, string, error) { return map[string]bigquery.Value{ "Name": i.Name, "Size": i.Size, "Count": i.Count, }, "", nil } func main() { ctx := context.Background() client, err := bigquery.NewClient(ctx, "project-id") if err != nil { // TODO: Handle error. } u := client.Dataset("my-dataset").Table("my-table").NewUploader() // Item implements the ValueSaver interface. items := []*Item{ {Name: "n1", Size: 32.6, Count: 7}, {Name: "n2", Size: 4, Count: 2}, {Name: "n3", Size: 101.5, Count: 1}, } if err := u.Put(ctx, items); err != nil { // TODO: Handle error. } }
Output:
type Value ¶
type Value interface{}
Value stores the contents of a single cell from a BigQuery result.
type ValueLoader ¶
ValueLoader stores a slice of Values representing a result row from a Read operation. See Iterator.Get for more information.
type ValueSaver ¶
type ValueSaver interface { // Save returns a row to be inserted into a BigQuery table, represented // as a map from field name to Value. // If insertID is non-empty, BigQuery will use it to de-duplicate // insertions of this row on a best-effort basis. Save() (row map[string]Value, insertID string, err error) }
A ValueSaver returns a row of data to be inserted into a table.
type ValuesSaver ¶
type ValuesSaver struct { Schema Schema // If non-empty, BigQuery will use InsertID to de-duplicate insertions // of this row on a best-effort basis. InsertID string Row []Value }
ValuesSaver implements ValueSaver for a slice of Values.