Documentation ¶
Index ¶
- Variables
- func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)
- func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)
- type AppendRowsRequest
- func (*AppendRowsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation
- func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value
- func (x *AppendRowsRequest) GetProtoRows() *AppendRowsRequest_ProtoData
- func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows
- func (x *AppendRowsRequest) GetTraceId() string
- func (x *AppendRowsRequest) GetWriteStream() string
- func (*AppendRowsRequest) ProtoMessage()
- func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message
- func (x *AppendRowsRequest) Reset()
- func (x *AppendRowsRequest) String() string
- type AppendRowsRequest_MissingValueInterpretation
- func (AppendRowsRequest_MissingValueInterpretation) Descriptor() protoreflect.EnumDescriptor
- func (x AppendRowsRequest_MissingValueInterpretation) Enum() *AppendRowsRequest_MissingValueInterpretation
- func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)deprecated
- func (x AppendRowsRequest_MissingValueInterpretation) Number() protoreflect.EnumNumber
- func (x AppendRowsRequest_MissingValueInterpretation) String() string
- func (AppendRowsRequest_MissingValueInterpretation) Type() protoreflect.EnumType
- type AppendRowsRequest_ProtoData
- func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)deprecated
- func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows
- func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema
- func (*AppendRowsRequest_ProtoData) ProtoMessage()
- func (x *AppendRowsRequest_ProtoData) ProtoReflect() protoreflect.Message
- func (x *AppendRowsRequest_ProtoData) Reset()
- func (x *AppendRowsRequest_ProtoData) String() string
- type AppendRowsRequest_ProtoRows
- type AppendRowsResponse
- func (*AppendRowsResponse) Descriptor() ([]byte, []int)deprecated
- func (x *AppendRowsResponse) GetAppendResult() *AppendRowsResponse_AppendResult
- func (x *AppendRowsResponse) GetError() *status.Status
- func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response
- func (x *AppendRowsResponse) GetRowErrors() []*RowError
- func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema
- func (x *AppendRowsResponse) GetWriteStream() string
- func (*AppendRowsResponse) ProtoMessage()
- func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message
- func (x *AppendRowsResponse) Reset()
- func (x *AppendRowsResponse) String() string
- type AppendRowsResponse_AppendResult
- func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)deprecated
- func (x *AppendRowsResponse_AppendResult) GetOffset() *wrapperspb.Int64Value
- func (*AppendRowsResponse_AppendResult) ProtoMessage()
- func (x *AppendRowsResponse_AppendResult) ProtoReflect() protoreflect.Message
- func (x *AppendRowsResponse_AppendResult) Reset()
- func (x *AppendRowsResponse_AppendResult) String() string
- type AppendRowsResponse_AppendResult_
- type AppendRowsResponse_Error
- type ArrowRecordBatch
- func (*ArrowRecordBatch) Descriptor() ([]byte, []int)deprecated
- func (x *ArrowRecordBatch) GetRowCount() int64deprecated
- func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte
- func (*ArrowRecordBatch) ProtoMessage()
- func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message
- func (x *ArrowRecordBatch) Reset()
- func (x *ArrowRecordBatch) String() string
- type ArrowSchema
- type ArrowSerializationOptions
- func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)deprecated
- func (x *ArrowSerializationOptions) GetBufferCompression() ArrowSerializationOptions_CompressionCodec
- func (*ArrowSerializationOptions) ProtoMessage()
- func (x *ArrowSerializationOptions) ProtoReflect() protoreflect.Message
- func (x *ArrowSerializationOptions) Reset()
- func (x *ArrowSerializationOptions) String() string
- type ArrowSerializationOptions_CompressionCodec
- func (ArrowSerializationOptions_CompressionCodec) Descriptor() protoreflect.EnumDescriptor
- func (x ArrowSerializationOptions_CompressionCodec) Enum() *ArrowSerializationOptions_CompressionCodec
- func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)deprecated
- func (x ArrowSerializationOptions_CompressionCodec) Number() protoreflect.EnumNumber
- func (x ArrowSerializationOptions_CompressionCodec) String() string
- func (ArrowSerializationOptions_CompressionCodec) Type() protoreflect.EnumType
- type AvroRows
- func (*AvroRows) Descriptor() ([]byte, []int)deprecated
- func (x *AvroRows) GetRowCount() int64deprecated
- func (x *AvroRows) GetSerializedBinaryRows() []byte
- func (*AvroRows) ProtoMessage()
- func (x *AvroRows) ProtoReflect() protoreflect.Message
- func (x *AvroRows) Reset()
- func (x *AvroRows) String() string
- type AvroSchema
- type AvroSerializationOptions
- func (*AvroSerializationOptions) Descriptor() ([]byte, []int)deprecated
- func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool
- func (*AvroSerializationOptions) ProtoMessage()
- func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message
- func (x *AvroSerializationOptions) Reset()
- func (x *AvroSerializationOptions) String() string
- type BatchCommitWriteStreamsRequest
- func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *BatchCommitWriteStreamsRequest) GetParent() string
- func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string
- func (*BatchCommitWriteStreamsRequest) ProtoMessage()
- func (x *BatchCommitWriteStreamsRequest) ProtoReflect() protoreflect.Message
- func (x *BatchCommitWriteStreamsRequest) Reset()
- func (x *BatchCommitWriteStreamsRequest) String() string
- type BatchCommitWriteStreamsResponse
- func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)deprecated
- func (x *BatchCommitWriteStreamsResponse) GetCommitTime() *timestamppb.Timestamp
- func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError
- func (*BatchCommitWriteStreamsResponse) ProtoMessage()
- func (x *BatchCommitWriteStreamsResponse) ProtoReflect() protoreflect.Message
- func (x *BatchCommitWriteStreamsResponse) Reset()
- func (x *BatchCommitWriteStreamsResponse) String() string
- type BigQueryReadClient
- type BigQueryReadServer
- type BigQueryRead_ReadRowsClient
- type BigQueryRead_ReadRowsServer
- type BigQueryWriteClient
- type BigQueryWriteServer
- type BigQueryWrite_AppendRowsClient
- type BigQueryWrite_AppendRowsServer
- type CreateReadSessionRequest
- func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)deprecated
- func (x *CreateReadSessionRequest) GetMaxStreamCount() int32
- func (x *CreateReadSessionRequest) GetParent() string
- func (x *CreateReadSessionRequest) GetPreferredMinStreamCount() int32
- func (x *CreateReadSessionRequest) GetReadSession() *ReadSession
- func (*CreateReadSessionRequest) ProtoMessage()
- func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message
- func (x *CreateReadSessionRequest) Reset()
- func (x *CreateReadSessionRequest) String() string
- type CreateWriteStreamRequest
- func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)deprecated
- func (x *CreateWriteStreamRequest) GetParent() string
- func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream
- func (*CreateWriteStreamRequest) ProtoMessage()
- func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Message
- func (x *CreateWriteStreamRequest) Reset()
- func (x *CreateWriteStreamRequest) String() string
- type DataFormat
- type FinalizeWriteStreamRequest
- func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)deprecated
- func (x *FinalizeWriteStreamRequest) GetName() string
- func (*FinalizeWriteStreamRequest) ProtoMessage()
- func (x *FinalizeWriteStreamRequest) ProtoReflect() protoreflect.Message
- func (x *FinalizeWriteStreamRequest) Reset()
- func (x *FinalizeWriteStreamRequest) String() string
- type FinalizeWriteStreamResponse
- func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)deprecated
- func (x *FinalizeWriteStreamResponse) GetRowCount() int64
- func (*FinalizeWriteStreamResponse) ProtoMessage()
- func (x *FinalizeWriteStreamResponse) ProtoReflect() protoreflect.Message
- func (x *FinalizeWriteStreamResponse) Reset()
- func (x *FinalizeWriteStreamResponse) String() string
- type FlushRowsRequest
- func (*FlushRowsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Value
- func (x *FlushRowsRequest) GetWriteStream() string
- func (*FlushRowsRequest) ProtoMessage()
- func (x *FlushRowsRequest) ProtoReflect() protoreflect.Message
- func (x *FlushRowsRequest) Reset()
- func (x *FlushRowsRequest) String() string
- type FlushRowsResponse
- type GetWriteStreamRequest
- func (*GetWriteStreamRequest) Descriptor() ([]byte, []int)deprecated
- func (x *GetWriteStreamRequest) GetName() string
- func (x *GetWriteStreamRequest) GetView() WriteStreamView
- func (*GetWriteStreamRequest) ProtoMessage()
- func (x *GetWriteStreamRequest) ProtoReflect() protoreflect.Message
- func (x *GetWriteStreamRequest) Reset()
- func (x *GetWriteStreamRequest) String() string
- type ProtoRows
- type ProtoSchema
- type ReadRowsRequest
- func (*ReadRowsRequest) Descriptor() ([]byte, []int)deprecated
- func (x *ReadRowsRequest) GetOffset() int64
- func (x *ReadRowsRequest) GetReadStream() string
- func (*ReadRowsRequest) ProtoMessage()
- func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message
- func (x *ReadRowsRequest) Reset()
- func (x *ReadRowsRequest) String() string
- type ReadRowsResponse
- func (*ReadRowsResponse) Descriptor() ([]byte, []int)deprecated
- func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
- func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchema
- func (x *ReadRowsResponse) GetAvroRows() *AvroRows
- func (x *ReadRowsResponse) GetAvroSchema() *AvroSchema
- func (x *ReadRowsResponse) GetRowCount() int64
- func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
- func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schema
- func (x *ReadRowsResponse) GetStats() *StreamStats
- func (x *ReadRowsResponse) GetThrottleState() *ThrottleState
- func (*ReadRowsResponse) ProtoMessage()
- func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message
- func (x *ReadRowsResponse) Reset()
- func (x *ReadRowsResponse) String() string
- type ReadRowsResponse_ArrowRecordBatch
- type ReadRowsResponse_ArrowSchema
- type ReadRowsResponse_AvroRows
- type ReadRowsResponse_AvroSchema
- type ReadSession
- func (*ReadSession) Descriptor() ([]byte, []int)deprecated
- func (x *ReadSession) GetArrowSchema() *ArrowSchema
- func (x *ReadSession) GetAvroSchema() *AvroSchema
- func (x *ReadSession) GetDataFormat() DataFormat
- func (x *ReadSession) GetEstimatedRowCount() int64
- func (x *ReadSession) GetEstimatedTotalBytesScanned() int64
- func (x *ReadSession) GetExpireTime() *timestamppb.Timestamp
- func (x *ReadSession) GetName() string
- func (x *ReadSession) GetReadOptions() *ReadSession_TableReadOptions
- func (m *ReadSession) GetSchema() isReadSession_Schema
- func (x *ReadSession) GetStreams() []*ReadStream
- func (x *ReadSession) GetTable() string
- func (x *ReadSession) GetTableModifiers() *ReadSession_TableModifiers
- func (x *ReadSession) GetTraceId() string
- func (*ReadSession) ProtoMessage()
- func (x *ReadSession) ProtoReflect() protoreflect.Message
- func (x *ReadSession) Reset()
- func (x *ReadSession) String() string
- type ReadSession_ArrowSchema
- type ReadSession_AvroSchema
- type ReadSession_TableModifiers
- func (*ReadSession_TableModifiers) Descriptor() ([]byte, []int)deprecated
- func (x *ReadSession_TableModifiers) GetSnapshotTime() *timestamppb.Timestamp
- func (*ReadSession_TableModifiers) ProtoMessage()
- func (x *ReadSession_TableModifiers) ProtoReflect() protoreflect.Message
- func (x *ReadSession_TableModifiers) Reset()
- func (x *ReadSession_TableModifiers) String() string
- type ReadSession_TableReadOptions
- func (*ReadSession_TableReadOptions) Descriptor() ([]byte, []int)deprecated
- func (x *ReadSession_TableReadOptions) GetArrowSerializationOptions() *ArrowSerializationOptions
- func (x *ReadSession_TableReadOptions) GetAvroSerializationOptions() *AvroSerializationOptions
- func (m *ReadSession_TableReadOptions) GetOutputFormatSerializationOptions() isReadSession_TableReadOptions_OutputFormatSerializationOptions
- func (x *ReadSession_TableReadOptions) GetRowRestriction() string
- func (x *ReadSession_TableReadOptions) GetSelectedFields() []string
- func (*ReadSession_TableReadOptions) ProtoMessage()
- func (x *ReadSession_TableReadOptions) ProtoReflect() protoreflect.Message
- func (x *ReadSession_TableReadOptions) Reset()
- func (x *ReadSession_TableReadOptions) String() string
- type ReadSession_TableReadOptions_ArrowSerializationOptions
- type ReadSession_TableReadOptions_AvroSerializationOptions
- type ReadStream
- type RowError
- func (*RowError) Descriptor() ([]byte, []int)deprecated
- func (x *RowError) GetCode() RowError_RowErrorCode
- func (x *RowError) GetIndex() int64
- func (x *RowError) GetMessage() string
- func (*RowError) ProtoMessage()
- func (x *RowError) ProtoReflect() protoreflect.Message
- func (x *RowError) Reset()
- func (x *RowError) String() string
- type RowError_RowErrorCode
- func (RowError_RowErrorCode) Descriptor() protoreflect.EnumDescriptor
- func (x RowError_RowErrorCode) Enum() *RowError_RowErrorCode
- func (RowError_RowErrorCode) EnumDescriptor() ([]byte, []int)deprecated
- func (x RowError_RowErrorCode) Number() protoreflect.EnumNumber
- func (x RowError_RowErrorCode) String() string
- func (RowError_RowErrorCode) Type() protoreflect.EnumType
- type SplitReadStreamRequest
- func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)deprecated
- func (x *SplitReadStreamRequest) GetFraction() float64
- func (x *SplitReadStreamRequest) GetName() string
- func (*SplitReadStreamRequest) ProtoMessage()
- func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Message
- func (x *SplitReadStreamRequest) Reset()
- func (x *SplitReadStreamRequest) String() string
- type SplitReadStreamResponse
- func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)deprecated
- func (x *SplitReadStreamResponse) GetPrimaryStream() *ReadStream
- func (x *SplitReadStreamResponse) GetRemainderStream() *ReadStream
- func (*SplitReadStreamResponse) ProtoMessage()
- func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Message
- func (x *SplitReadStreamResponse) Reset()
- func (x *SplitReadStreamResponse) String() string
- type StorageError
- func (*StorageError) Descriptor() ([]byte, []int)deprecated
- func (x *StorageError) GetCode() StorageError_StorageErrorCode
- func (x *StorageError) GetEntity() string
- func (x *StorageError) GetErrorMessage() string
- func (*StorageError) ProtoMessage()
- func (x *StorageError) ProtoReflect() protoreflect.Message
- func (x *StorageError) Reset()
- func (x *StorageError) String() string
- type StorageError_StorageErrorCode
- func (StorageError_StorageErrorCode) Descriptor() protoreflect.EnumDescriptor
- func (x StorageError_StorageErrorCode) Enum() *StorageError_StorageErrorCode
- func (StorageError_StorageErrorCode) EnumDescriptor() ([]byte, []int)deprecated
- func (x StorageError_StorageErrorCode) Number() protoreflect.EnumNumber
- func (x StorageError_StorageErrorCode) String() string
- func (StorageError_StorageErrorCode) Type() protoreflect.EnumType
- type StreamStats
- type StreamStats_Progress
- func (*StreamStats_Progress) Descriptor() ([]byte, []int)deprecated
- func (x *StreamStats_Progress) GetAtResponseEnd() float64
- func (x *StreamStats_Progress) GetAtResponseStart() float64
- func (*StreamStats_Progress) ProtoMessage()
- func (x *StreamStats_Progress) ProtoReflect() protoreflect.Message
- func (x *StreamStats_Progress) Reset()
- func (x *StreamStats_Progress) String() string
- type TableFieldSchema
- func (*TableFieldSchema) Descriptor() ([]byte, []int)deprecated
- func (x *TableFieldSchema) GetDefaultValueExpression() string
- func (x *TableFieldSchema) GetDescription() string
- func (x *TableFieldSchema) GetFields() []*TableFieldSchema
- func (x *TableFieldSchema) GetMaxLength() int64
- func (x *TableFieldSchema) GetMode() TableFieldSchema_Mode
- func (x *TableFieldSchema) GetName() string
- func (x *TableFieldSchema) GetPrecision() int64
- func (x *TableFieldSchema) GetScale() int64
- func (x *TableFieldSchema) GetType() TableFieldSchema_Type
- func (*TableFieldSchema) ProtoMessage()
- func (x *TableFieldSchema) ProtoReflect() protoreflect.Message
- func (x *TableFieldSchema) Reset()
- func (x *TableFieldSchema) String() string
- type TableFieldSchema_Mode
- func (TableFieldSchema_Mode) Descriptor() protoreflect.EnumDescriptor
- func (x TableFieldSchema_Mode) Enum() *TableFieldSchema_Mode
- func (TableFieldSchema_Mode) EnumDescriptor() ([]byte, []int)deprecated
- func (x TableFieldSchema_Mode) Number() protoreflect.EnumNumber
- func (x TableFieldSchema_Mode) String() string
- func (TableFieldSchema_Mode) Type() protoreflect.EnumType
- type TableFieldSchema_Type
- func (TableFieldSchema_Type) Descriptor() protoreflect.EnumDescriptor
- func (x TableFieldSchema_Type) Enum() *TableFieldSchema_Type
- func (TableFieldSchema_Type) EnumDescriptor() ([]byte, []int)deprecated
- func (x TableFieldSchema_Type) Number() protoreflect.EnumNumber
- func (x TableFieldSchema_Type) String() string
- func (TableFieldSchema_Type) Type() protoreflect.EnumType
- type TableSchema
- type ThrottleState
- type UnimplementedBigQueryReadServer
- func (*UnimplementedBigQueryReadServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
- func (*UnimplementedBigQueryReadServer) ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
- func (*UnimplementedBigQueryReadServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
- type UnimplementedBigQueryWriteServer
- func (*UnimplementedBigQueryWriteServer) AppendRows(BigQueryWrite_AppendRowsServer) error
- func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
- func (*UnimplementedBigQueryWriteServer) CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
- func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
- func (*UnimplementedBigQueryWriteServer) FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
- func (*UnimplementedBigQueryWriteServer) GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
- type WriteStream
- func (*WriteStream) Descriptor() ([]byte, []int)deprecated
- func (x *WriteStream) GetCommitTime() *timestamppb.Timestamp
- func (x *WriteStream) GetCreateTime() *timestamppb.Timestamp
- func (x *WriteStream) GetLocation() string
- func (x *WriteStream) GetName() string
- func (x *WriteStream) GetTableSchema() *TableSchema
- func (x *WriteStream) GetType() WriteStream_Type
- func (x *WriteStream) GetWriteMode() WriteStream_WriteMode
- func (*WriteStream) ProtoMessage()
- func (x *WriteStream) ProtoReflect() protoreflect.Message
- func (x *WriteStream) Reset()
- func (x *WriteStream) String() string
- type WriteStreamView
- func (WriteStreamView) Descriptor() protoreflect.EnumDescriptor
- func (x WriteStreamView) Enum() *WriteStreamView
- func (WriteStreamView) EnumDescriptor() ([]byte, []int)deprecated
- func (x WriteStreamView) Number() protoreflect.EnumNumber
- func (x WriteStreamView) String() string
- func (WriteStreamView) Type() protoreflect.EnumType
- type WriteStream_Type
- func (WriteStream_Type) Descriptor() protoreflect.EnumDescriptor
- func (x WriteStream_Type) Enum() *WriteStream_Type
- func (WriteStream_Type) EnumDescriptor() ([]byte, []int)deprecated
- func (x WriteStream_Type) Number() protoreflect.EnumNumber
- func (x WriteStream_Type) String() string
- func (WriteStream_Type) Type() protoreflect.EnumType
- type WriteStream_WriteMode
- func (WriteStream_WriteMode) Descriptor() protoreflect.EnumDescriptor
- func (x WriteStream_WriteMode) Enum() *WriteStream_WriteMode
- func (WriteStream_WriteMode) EnumDescriptor() ([]byte, []int)deprecated
- func (x WriteStream_WriteMode) Number() protoreflect.EnumNumber
- func (x WriteStream_WriteMode) String() string
- func (WriteStream_WriteMode) Type() protoreflect.EnumType
Constants ¶
This section is empty.
Variables ¶
var ( ArrowSerializationOptions_CompressionCodec_name = map[int32]string{ 0: "COMPRESSION_UNSPECIFIED", 1: "LZ4_FRAME", 2: "ZSTD", } ArrowSerializationOptions_CompressionCodec_value = map[string]int32{ "COMPRESSION_UNSPECIFIED": 0, "LZ4_FRAME": 1, "ZSTD": 2, } )
Enum value maps for ArrowSerializationOptions_CompressionCodec.
var ( AppendRowsRequest_MissingValueInterpretation_name = map[int32]string{ 0: "MISSING_VALUE_INTERPRETATION_UNSPECIFIED", 1: "NULL_VALUE", 2: "DEFAULT_VALUE", } AppendRowsRequest_MissingValueInterpretation_value = map[string]int32{ "MISSING_VALUE_INTERPRETATION_UNSPECIFIED": 0, "NULL_VALUE": 1, "DEFAULT_VALUE": 2, } )
Enum value maps for AppendRowsRequest_MissingValueInterpretation.
var ( StorageError_StorageErrorCode_name = map[int32]string{ 0: "STORAGE_ERROR_CODE_UNSPECIFIED", 1: "TABLE_NOT_FOUND", 2: "STREAM_ALREADY_COMMITTED", 3: "STREAM_NOT_FOUND", 4: "INVALID_STREAM_TYPE", 5: "INVALID_STREAM_STATE", 6: "STREAM_FINALIZED", 7: "SCHEMA_MISMATCH_EXTRA_FIELDS", 8: "OFFSET_ALREADY_EXISTS", 9: "OFFSET_OUT_OF_RANGE", } StorageError_StorageErrorCode_value = map[string]int32{ "STORAGE_ERROR_CODE_UNSPECIFIED": 0, "TABLE_NOT_FOUND": 1, "STREAM_ALREADY_COMMITTED": 2, "STREAM_NOT_FOUND": 3, "INVALID_STREAM_TYPE": 4, "INVALID_STREAM_STATE": 5, "STREAM_FINALIZED": 6, "SCHEMA_MISMATCH_EXTRA_FIELDS": 7, "OFFSET_ALREADY_EXISTS": 8, "OFFSET_OUT_OF_RANGE": 9, } )
Enum value maps for StorageError_StorageErrorCode.
var ( RowError_RowErrorCode_name = map[int32]string{ 0: "ROW_ERROR_CODE_UNSPECIFIED", 1: "FIELDS_ERROR", } RowError_RowErrorCode_value = map[string]int32{ "ROW_ERROR_CODE_UNSPECIFIED": 0, "FIELDS_ERROR": 1, } )
Enum value maps for RowError_RowErrorCode.
var ( DataFormat_name = map[int32]string{ 0: "DATA_FORMAT_UNSPECIFIED", 1: "AVRO", 2: "ARROW", } DataFormat_value = map[string]int32{ "DATA_FORMAT_UNSPECIFIED": 0, "AVRO": 1, "ARROW": 2, } )
Enum value maps for DataFormat.
var ( WriteStreamView_name = map[int32]string{ 0: "WRITE_STREAM_VIEW_UNSPECIFIED", 1: "BASIC", 2: "FULL", } WriteStreamView_value = map[string]int32{ "WRITE_STREAM_VIEW_UNSPECIFIED": 0, "BASIC": 1, "FULL": 2, } )
Enum value maps for WriteStreamView.
var ( WriteStream_Type_name = map[int32]string{ 0: "TYPE_UNSPECIFIED", 1: "COMMITTED", 2: "PENDING", 3: "BUFFERED", } WriteStream_Type_value = map[string]int32{ "TYPE_UNSPECIFIED": 0, "COMMITTED": 1, "PENDING": 2, "BUFFERED": 3, } )
Enum value maps for WriteStream_Type.
var ( WriteStream_WriteMode_name = map[int32]string{ 0: "WRITE_MODE_UNSPECIFIED", 1: "INSERT", } WriteStream_WriteMode_value = map[string]int32{ "WRITE_MODE_UNSPECIFIED": 0, "INSERT": 1, } )
Enum value maps for WriteStream_WriteMode.
var ( TableFieldSchema_Type_name = map[int32]string{ 0: "TYPE_UNSPECIFIED", 1: "STRING", 2: "INT64", 3: "DOUBLE", 4: "STRUCT", 5: "BYTES", 6: "BOOL", 7: "TIMESTAMP", 8: "DATE", 9: "TIME", 10: "DATETIME", 11: "GEOGRAPHY", 12: "NUMERIC", 13: "BIGNUMERIC", 14: "INTERVAL", 15: "JSON", } TableFieldSchema_Type_value = map[string]int32{ "TYPE_UNSPECIFIED": 0, "STRING": 1, "INT64": 2, "DOUBLE": 3, "STRUCT": 4, "BYTES": 5, "BOOL": 6, "TIMESTAMP": 7, "DATE": 8, "TIME": 9, "DATETIME": 10, "GEOGRAPHY": 11, "NUMERIC": 12, "BIGNUMERIC": 13, "INTERVAL": 14, "JSON": 15, } )
Enum value maps for TableFieldSchema_Type.
var ( TableFieldSchema_Mode_name = map[int32]string{ 0: "MODE_UNSPECIFIED", 1: "NULLABLE", 2: "REQUIRED", 3: "REPEATED", } TableFieldSchema_Mode_value = map[string]int32{ "MODE_UNSPECIFIED": 0, "NULLABLE": 1, "REQUIRED": 2, "REPEATED": 3, } )
Enum value maps for TableFieldSchema_Mode.
var ( // Setting the column_name extension allows users to reference // bigquery column independently of the field name in the protocol buffer // message. // // The intended use of this annotation is to reference a destination column // named using characters unavailable for protobuf field names (e.g. unicode // characters). // // More details about BigQuery naming limitations can be found here: // https://cloud.google.com/bigquery/docs/schemas#column_names // // This extension is currently experimental. // // optional string column_name = 454943157; E_ColumnName = &file_google_cloud_bigquery_storage_v1_annotations_proto_extTypes[0] )
Extension fields to descriptorpb.FieldOptions.
var File_google_cloud_bigquery_storage_v1_annotations_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_arrow_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_avro_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_protobuf_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_storage_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_stream_proto protoreflect.FileDescriptor
var File_google_cloud_bigquery_storage_v1_table_proto protoreflect.FileDescriptor
Functions ¶
func RegisterBigQueryReadServer ¶
func RegisterBigQueryReadServer(s *grpc.Server, srv BigQueryReadServer)
func RegisterBigQueryWriteServer ¶
func RegisterBigQueryWriteServer(s *grpc.Server, srv BigQueryWriteServer)
Types ¶
type AppendRowsRequest ¶
type AppendRowsRequest struct { // Required. The write_stream identifies the target of the append operation, // and only needs to be specified as part of the first request on the gRPC // connection. If provided for subsequent requests, it must match the value of // the first request. // // For explicitly created write streams, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{id}` // // For the special default stream, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` // If present, the write is only performed if the next append offset is same // as the provided value. If not present, the write is performed at the // current end of stream. Specifying a value for this field is not allowed // when calling AppendRows for the '_default' stream. Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` // Input rows. The `writer_schema` field must be specified at the initial // request and currently, it will be ignored if specified in following // requests. Following requests must have data in the same format as the // initial request. // // Types that are assignable to Rows: // *AppendRowsRequest_ProtoRows Rows isAppendRowsRequest_Rows `protobuf_oneof:"rows"` // Id set by client to annotate its identity. Only initial request setting is // respected. TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` // A map to indicate how to interpret missing value for some fields. Missing // values are fields present in user schema but missing in rows. The key is // the field name. The value is the interpretation of missing values for the // field. // // For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} means all // missing values in field foo are interpreted as NULL, all missing values in // field bar are interpreted as the default value of field bar in table // schema. // // If a field is not in this map and has missing values, the missing values // in this field are interpreted as NULL. // // This field only applies to the current request, it won't affect other // requests on the connection. // // Currently, field name can only be top-level column name, can't be a struct // field path like 'foo.bar'. MissingValueInterpretations map[string]AppendRowsRequest_MissingValueInterpretation `` /* 316-byte string literal not displayed */ // contains filtered or unexported fields }
Request message for `AppendRows`.
Due to the nature of AppendRows being a bidirectional streaming RPC, certain parts of the AppendRowsRequest need only be specified for the first request sent each time the gRPC network connection is opened/reopened.
The size of a single AppendRowsRequest must be less than 10 MB in size. Requests larger than this return an error, typically `INVALID_ARGUMENT`.
func (*AppendRowsRequest) Descriptor
deprecated
func (*AppendRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest) GetMissingValueInterpretations ¶
func (x *AppendRowsRequest) GetMissingValueInterpretations() map[string]AppendRowsRequest_MissingValueInterpretation
func (*AppendRowsRequest) GetOffset ¶
func (x *AppendRowsRequest) GetOffset() *wrapperspb.Int64Value
func (*AppendRowsRequest) GetProtoRows ¶
func (x *AppendRowsRequest) GetProtoRows() *AppendRowsRequest_ProtoData
func (*AppendRowsRequest) GetRows ¶
func (m *AppendRowsRequest) GetRows() isAppendRowsRequest_Rows
func (*AppendRowsRequest) GetTraceId ¶
func (x *AppendRowsRequest) GetTraceId() string
func (*AppendRowsRequest) GetWriteStream ¶
func (x *AppendRowsRequest) GetWriteStream() string
func (*AppendRowsRequest) ProtoMessage ¶
func (*AppendRowsRequest) ProtoMessage()
func (*AppendRowsRequest) ProtoReflect ¶
func (x *AppendRowsRequest) ProtoReflect() protoreflect.Message
func (*AppendRowsRequest) Reset ¶
func (x *AppendRowsRequest) Reset()
func (*AppendRowsRequest) String ¶
func (x *AppendRowsRequest) String() string
type AppendRowsRequest_MissingValueInterpretation ¶
type AppendRowsRequest_MissingValueInterpretation int32
An enum to indicate how to interpret missing values. Missing values are fields present in user schema but missing in rows. A missing value can represent a NULL or a column default value defined in BigQuery table schema.
const ( // Invalid missing value interpretation. Requests with this value will be // rejected. AppendRowsRequest_MISSING_VALUE_INTERPRETATION_UNSPECIFIED AppendRowsRequest_MissingValueInterpretation = 0 // Missing value is interpreted as NULL. AppendRowsRequest_NULL_VALUE AppendRowsRequest_MissingValueInterpretation = 1 // Missing value is interpreted as column default value if declared in the // table schema, NULL otherwise. AppendRowsRequest_DEFAULT_VALUE AppendRowsRequest_MissingValueInterpretation = 2 )
func (AppendRowsRequest_MissingValueInterpretation) Descriptor ¶
func (AppendRowsRequest_MissingValueInterpretation) Descriptor() protoreflect.EnumDescriptor
func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor
deprecated
func (AppendRowsRequest_MissingValueInterpretation) EnumDescriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest_MissingValueInterpretation.Descriptor instead.
func (AppendRowsRequest_MissingValueInterpretation) Number ¶
func (x AppendRowsRequest_MissingValueInterpretation) Number() protoreflect.EnumNumber
func (AppendRowsRequest_MissingValueInterpretation) String ¶
func (x AppendRowsRequest_MissingValueInterpretation) String() string
func (AppendRowsRequest_MissingValueInterpretation) Type ¶
func (AppendRowsRequest_MissingValueInterpretation) Type() protoreflect.EnumType
type AppendRowsRequest_ProtoData ¶
type AppendRowsRequest_ProtoData struct { // Proto schema used to serialize the data. This value only needs to be // provided as part of the first request on a gRPC network connection, // and will be ignored for subsequent requests on the connection. WriterSchema *ProtoSchema `protobuf:"bytes,1,opt,name=writer_schema,json=writerSchema,proto3" json:"writer_schema,omitempty"` // Serialized row data in protobuf message format. // Currently, the backend expects the serialized rows to adhere to // proto2 semantics when appending rows, particularly with respect to // how default values are encoded. Rows *ProtoRows `protobuf:"bytes,2,opt,name=rows,proto3" json:"rows,omitempty"` // contains filtered or unexported fields }
ProtoData contains the data rows and schema when constructing append requests.
func (*AppendRowsRequest_ProtoData) Descriptor
deprecated
func (*AppendRowsRequest_ProtoData) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsRequest_ProtoData.ProtoReflect.Descriptor instead.
func (*AppendRowsRequest_ProtoData) GetRows ¶
func (x *AppendRowsRequest_ProtoData) GetRows() *ProtoRows
func (*AppendRowsRequest_ProtoData) GetWriterSchema ¶
func (x *AppendRowsRequest_ProtoData) GetWriterSchema() *ProtoSchema
func (*AppendRowsRequest_ProtoData) ProtoMessage ¶
func (*AppendRowsRequest_ProtoData) ProtoMessage()
func (*AppendRowsRequest_ProtoData) ProtoReflect ¶
func (x *AppendRowsRequest_ProtoData) ProtoReflect() protoreflect.Message
func (*AppendRowsRequest_ProtoData) Reset ¶
func (x *AppendRowsRequest_ProtoData) Reset()
func (*AppendRowsRequest_ProtoData) String ¶
func (x *AppendRowsRequest_ProtoData) String() string
type AppendRowsRequest_ProtoRows ¶
type AppendRowsRequest_ProtoRows struct { // Rows in proto format. ProtoRows *AppendRowsRequest_ProtoData `protobuf:"bytes,4,opt,name=proto_rows,json=protoRows,proto3,oneof"` }
type AppendRowsResponse ¶
type AppendRowsResponse struct { // Types that are assignable to Response: // *AppendRowsResponse_AppendResult_ // *AppendRowsResponse_Error Response isAppendRowsResponse_Response `protobuf_oneof:"response"` // If backend detects a schema update, pass it to user so that user can // use it to input new type of message. It will be empty when no schema // updates have occurred. UpdatedSchema *TableSchema `protobuf:"bytes,3,opt,name=updated_schema,json=updatedSchema,proto3" json:"updated_schema,omitempty"` // If a request failed due to corrupted rows, no rows in the batch will be // appended. The API will return row level error info, so that the caller can // remove the bad rows and retry the request. RowErrors []*RowError `protobuf:"bytes,4,rep,name=row_errors,json=rowErrors,proto3" json:"row_errors,omitempty"` // The target of the append operation. Matches the write_stream in the // corresponding request. WriteStream string `protobuf:"bytes,5,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` // contains filtered or unexported fields }
Response message for `AppendRows`.
func (*AppendRowsResponse) Descriptor
deprecated
func (*AppendRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsResponse.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse) GetAppendResult ¶
func (x *AppendRowsResponse) GetAppendResult() *AppendRowsResponse_AppendResult
func (*AppendRowsResponse) GetError ¶
func (x *AppendRowsResponse) GetError() *status.Status
func (*AppendRowsResponse) GetResponse ¶
func (m *AppendRowsResponse) GetResponse() isAppendRowsResponse_Response
func (*AppendRowsResponse) GetRowErrors ¶
func (x *AppendRowsResponse) GetRowErrors() []*RowError
func (*AppendRowsResponse) GetUpdatedSchema ¶
func (x *AppendRowsResponse) GetUpdatedSchema() *TableSchema
func (*AppendRowsResponse) GetWriteStream ¶
func (x *AppendRowsResponse) GetWriteStream() string
func (*AppendRowsResponse) ProtoMessage ¶
func (*AppendRowsResponse) ProtoMessage()
func (*AppendRowsResponse) ProtoReflect ¶
func (x *AppendRowsResponse) ProtoReflect() protoreflect.Message
func (*AppendRowsResponse) Reset ¶
func (x *AppendRowsResponse) Reset()
func (*AppendRowsResponse) String ¶
func (x *AppendRowsResponse) String() string
type AppendRowsResponse_AppendResult ¶
type AppendRowsResponse_AppendResult struct { // The row offset at which the last append occurred. The offset will not be // set if appending using default streams. Offset *wrapperspb.Int64Value `protobuf:"bytes,1,opt,name=offset,proto3" json:"offset,omitempty"` // contains filtered or unexported fields }
AppendResult is returned for successful append requests.
func (*AppendRowsResponse_AppendResult) Descriptor
deprecated
func (*AppendRowsResponse_AppendResult) Descriptor() ([]byte, []int)
Deprecated: Use AppendRowsResponse_AppendResult.ProtoReflect.Descriptor instead.
func (*AppendRowsResponse_AppendResult) GetOffset ¶
func (x *AppendRowsResponse_AppendResult) GetOffset() *wrapperspb.Int64Value
func (*AppendRowsResponse_AppendResult) ProtoMessage ¶
func (*AppendRowsResponse_AppendResult) ProtoMessage()
func (*AppendRowsResponse_AppendResult) ProtoReflect ¶
func (x *AppendRowsResponse_AppendResult) ProtoReflect() protoreflect.Message
func (*AppendRowsResponse_AppendResult) Reset ¶
func (x *AppendRowsResponse_AppendResult) Reset()
func (*AppendRowsResponse_AppendResult) String ¶
func (x *AppendRowsResponse_AppendResult) String() string
type AppendRowsResponse_AppendResult_ ¶
type AppendRowsResponse_AppendResult_ struct { // Result if the append is successful. AppendResult *AppendRowsResponse_AppendResult `protobuf:"bytes,1,opt,name=append_result,json=appendResult,proto3,oneof"` }
type AppendRowsResponse_Error ¶
type AppendRowsResponse_Error struct { // Error returned when problems were encountered. If present, // it indicates rows were not accepted into the system. // Users can retry or continue with other append requests within the // same connection. // // Additional information about error signalling: // // ALREADY_EXISTS: Happens when an append specified an offset, and the // backend already has received data at this offset. Typically encountered // in retry scenarios, and can be ignored. // // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond // the current end of the stream. // // INVALID_ARGUMENT: Indicates a malformed request or data. // // ABORTED: Request processing is aborted because of prior failures. The // request can be retried if previous failure is addressed. // // INTERNAL: Indicates server side error(s) that can be retried. Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3,oneof"` }
type ArrowRecordBatch ¶
type ArrowRecordBatch struct { // IPC-serialized Arrow RecordBatch. SerializedRecordBatch []byte `` /* 126-byte string literal not displayed */ // [Deprecated] The count of rows in `serialized_record_batch`. // Please use the format-independent ReadRowsResponse.row_count instead. // // Deprecated: Do not use. RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` // contains filtered or unexported fields }
Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor
deprecated
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)
Deprecated: Use ArrowRecordBatch.ProtoReflect.Descriptor instead.
func (*ArrowRecordBatch) GetRowCount
deprecated
func (x *ArrowRecordBatch) GetRowCount() int64
Deprecated: Do not use.
func (*ArrowRecordBatch) GetSerializedRecordBatch ¶
func (x *ArrowRecordBatch) GetSerializedRecordBatch() []byte
func (*ArrowRecordBatch) ProtoMessage ¶
func (*ArrowRecordBatch) ProtoMessage()
func (*ArrowRecordBatch) ProtoReflect ¶
func (x *ArrowRecordBatch) ProtoReflect() protoreflect.Message
func (*ArrowRecordBatch) Reset ¶
func (x *ArrowRecordBatch) Reset()
func (*ArrowRecordBatch) String ¶
func (x *ArrowRecordBatch) String() string
type ArrowSchema ¶
type ArrowSchema struct { // IPC serialized Arrow schema. SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"` // contains filtered or unexported fields }
Arrow schema as specified in https://arrow.apache.org/docs/python/api/datatypes.html and serialized to bytes using IPC: https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc
See code samples on how this message can be deserialized.
func (*ArrowSchema) Descriptor
deprecated
func (*ArrowSchema) Descriptor() ([]byte, []int)
Deprecated: Use ArrowSchema.ProtoReflect.Descriptor instead.
func (*ArrowSchema) GetSerializedSchema ¶
func (x *ArrowSchema) GetSerializedSchema() []byte
func (*ArrowSchema) ProtoMessage ¶
func (*ArrowSchema) ProtoMessage()
func (*ArrowSchema) ProtoReflect ¶
func (x *ArrowSchema) ProtoReflect() protoreflect.Message
func (*ArrowSchema) Reset ¶
func (x *ArrowSchema) Reset()
func (*ArrowSchema) String ¶
func (x *ArrowSchema) String() string
type ArrowSerializationOptions ¶
type ArrowSerializationOptions struct { // The compression codec to use for Arrow buffers in serialized record // batches. BufferCompression ArrowSerializationOptions_CompressionCodec `` /* 194-byte string literal not displayed */ // contains filtered or unexported fields }
Contains options specific to Arrow Serialization.
func (*ArrowSerializationOptions) Descriptor
deprecated
func (*ArrowSerializationOptions) Descriptor() ([]byte, []int)
Deprecated: Use ArrowSerializationOptions.ProtoReflect.Descriptor instead.
func (*ArrowSerializationOptions) GetBufferCompression ¶
func (x *ArrowSerializationOptions) GetBufferCompression() ArrowSerializationOptions_CompressionCodec
func (*ArrowSerializationOptions) ProtoMessage ¶
func (*ArrowSerializationOptions) ProtoMessage()
func (*ArrowSerializationOptions) ProtoReflect ¶
func (x *ArrowSerializationOptions) ProtoReflect() protoreflect.Message
func (*ArrowSerializationOptions) Reset ¶
func (x *ArrowSerializationOptions) Reset()
func (*ArrowSerializationOptions) String ¶
func (x *ArrowSerializationOptions) String() string
type ArrowSerializationOptions_CompressionCodec ¶
type ArrowSerializationOptions_CompressionCodec int32
Compression codec's supported by Arrow.
const ( // If unspecified no compression will be used. ArrowSerializationOptions_COMPRESSION_UNSPECIFIED ArrowSerializationOptions_CompressionCodec = 0 // LZ4 Frame (https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md) ArrowSerializationOptions_LZ4_FRAME ArrowSerializationOptions_CompressionCodec = 1 // Zstandard compression. ArrowSerializationOptions_ZSTD ArrowSerializationOptions_CompressionCodec = 2 )
func (ArrowSerializationOptions_CompressionCodec) Descriptor ¶
func (ArrowSerializationOptions_CompressionCodec) Descriptor() protoreflect.EnumDescriptor
func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor
deprecated
func (ArrowSerializationOptions_CompressionCodec) EnumDescriptor() ([]byte, []int)
Deprecated: Use ArrowSerializationOptions_CompressionCodec.Descriptor instead.
func (ArrowSerializationOptions_CompressionCodec) Number ¶
func (x ArrowSerializationOptions_CompressionCodec) Number() protoreflect.EnumNumber
func (ArrowSerializationOptions_CompressionCodec) String ¶
func (x ArrowSerializationOptions_CompressionCodec) String() string
func (ArrowSerializationOptions_CompressionCodec) Type ¶
func (ArrowSerializationOptions_CompressionCodec) Type() protoreflect.EnumType
type AvroRows ¶
type AvroRows struct { // Binary serialized rows in a block. SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"` // [Deprecated] The count of rows in the returning block. // Please use the format-independent ReadRowsResponse.row_count instead. // // Deprecated: Do not use. RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` // contains filtered or unexported fields }
Avro rows.
func (*AvroRows) Descriptor
deprecated
func (*AvroRows) GetRowCount
deprecated
func (*AvroRows) GetSerializedBinaryRows ¶
func (*AvroRows) ProtoMessage ¶
func (*AvroRows) ProtoMessage()
func (*AvroRows) ProtoReflect ¶
func (x *AvroRows) ProtoReflect() protoreflect.Message
type AvroSchema ¶
type AvroSchema struct { // Json serialized schema, as described at // https://avro.apache.org/docs/1.8.1/spec.html. Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` // contains filtered or unexported fields }
Avro schema.
func (*AvroSchema) Descriptor
deprecated
func (*AvroSchema) Descriptor() ([]byte, []int)
Deprecated: Use AvroSchema.ProtoReflect.Descriptor instead.
func (*AvroSchema) GetSchema ¶
func (x *AvroSchema) GetSchema() string
func (*AvroSchema) ProtoMessage ¶
func (*AvroSchema) ProtoMessage()
func (*AvroSchema) ProtoReflect ¶
func (x *AvroSchema) ProtoReflect() protoreflect.Message
func (*AvroSchema) Reset ¶
func (x *AvroSchema) Reset()
func (*AvroSchema) String ¶
func (x *AvroSchema) String() string
type AvroSerializationOptions ¶
type AvroSerializationOptions struct { // Enable displayName attribute in Avro schema. // // The Avro specification requires field names to be alphanumeric. By // default, in cases when column names do not conform to these requirements // (e.g. non-ascii unicode codepoints) and Avro is requested as an output // format, the CreateReadSession call will fail. // // Setting this field to true, populates avro field names with a placeholder // value and populates a "displayName" attribute for every avro field with the // original column name. EnableDisplayNameAttribute bool `` /* 144-byte string literal not displayed */ // contains filtered or unexported fields }
Contains options specific to Avro Serialization.
func (*AvroSerializationOptions) Descriptor
deprecated
func (*AvroSerializationOptions) Descriptor() ([]byte, []int)
Deprecated: Use AvroSerializationOptions.ProtoReflect.Descriptor instead.
func (*AvroSerializationOptions) GetEnableDisplayNameAttribute ¶
func (x *AvroSerializationOptions) GetEnableDisplayNameAttribute() bool
func (*AvroSerializationOptions) ProtoMessage ¶
func (*AvroSerializationOptions) ProtoMessage()
func (*AvroSerializationOptions) ProtoReflect ¶
func (x *AvroSerializationOptions) ProtoReflect() protoreflect.Message
func (*AvroSerializationOptions) Reset ¶
func (x *AvroSerializationOptions) Reset()
func (*AvroSerializationOptions) String ¶
func (x *AvroSerializationOptions) String() string
type BatchCommitWriteStreamsRequest ¶
type BatchCommitWriteStreamsRequest struct { // Required. Parent table that all the streams should belong to, in the form // of `projects/{project}/datasets/{dataset}/tables/{table}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The group of streams that will be committed atomically. WriteStreams []string `protobuf:"bytes,2,rep,name=write_streams,json=writeStreams,proto3" json:"write_streams,omitempty"` // contains filtered or unexported fields }
Request message for `BatchCommitWriteStreams`.
func (*BatchCommitWriteStreamsRequest) Descriptor
deprecated
func (*BatchCommitWriteStreamsRequest) Descriptor() ([]byte, []int)
Deprecated: Use BatchCommitWriteStreamsRequest.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsRequest) GetParent ¶
func (x *BatchCommitWriteStreamsRequest) GetParent() string
func (*BatchCommitWriteStreamsRequest) GetWriteStreams ¶
func (x *BatchCommitWriteStreamsRequest) GetWriteStreams() []string
func (*BatchCommitWriteStreamsRequest) ProtoMessage ¶
func (*BatchCommitWriteStreamsRequest) ProtoMessage()
func (*BatchCommitWriteStreamsRequest) ProtoReflect ¶
func (x *BatchCommitWriteStreamsRequest) ProtoReflect() protoreflect.Message
func (*BatchCommitWriteStreamsRequest) Reset ¶
func (x *BatchCommitWriteStreamsRequest) Reset()
func (*BatchCommitWriteStreamsRequest) String ¶
func (x *BatchCommitWriteStreamsRequest) String() string
type BatchCommitWriteStreamsResponse ¶
type BatchCommitWriteStreamsResponse struct { // The time at which streams were committed in microseconds granularity. // This field will only exist when there are no stream errors. // **Note** if this field is not set, it means the commit was not successful. CommitTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"` // Stream level error if commit failed. Only streams with error will be in // the list. // If empty, there is no error and all streams are committed successfully. // If non empty, certain streams have errors and ZERO stream is committed due // to atomicity guarantee. StreamErrors []*StorageError `protobuf:"bytes,2,rep,name=stream_errors,json=streamErrors,proto3" json:"stream_errors,omitempty"` // contains filtered or unexported fields }
Response message for `BatchCommitWriteStreams`.
func (*BatchCommitWriteStreamsResponse) Descriptor
deprecated
func (*BatchCommitWriteStreamsResponse) Descriptor() ([]byte, []int)
Deprecated: Use BatchCommitWriteStreamsResponse.ProtoReflect.Descriptor instead.
func (*BatchCommitWriteStreamsResponse) GetCommitTime ¶
func (x *BatchCommitWriteStreamsResponse) GetCommitTime() *timestamppb.Timestamp
func (*BatchCommitWriteStreamsResponse) GetStreamErrors ¶
func (x *BatchCommitWriteStreamsResponse) GetStreamErrors() []*StorageError
func (*BatchCommitWriteStreamsResponse) ProtoMessage ¶
func (*BatchCommitWriteStreamsResponse) ProtoMessage()
func (*BatchCommitWriteStreamsResponse) ProtoReflect ¶
func (x *BatchCommitWriteStreamsResponse) ProtoReflect() protoreflect.Message
func (*BatchCommitWriteStreamsResponse) Reset ¶
func (x *BatchCommitWriteStreamsResponse) Reset()
func (*BatchCommitWriteStreamsResponse) String ¶
func (x *BatchCommitWriteStreamsResponse) String() string
type BigQueryReadClient ¶
type BigQueryReadClient interface { // Creates a new read session. A read session divides the contents of a // BigQuery table into one or more streams, which can then be used to read // data from the table. The read session also specifies properties of the // data to be read, such as a list of columns or a push-down filter describing // the rows to be returned. // // A particular row can be read by at most one stream. When the caller has // reached the end of each stream in the session, then all the data in the // table has been read. // // Data is assigned to each stream such that roughly the same number of // rows can be read from each stream. Because the server-side unit for // assigning data is collections of rows, the API does not guarantee that // each stream will return the same number or rows. Additionally, the // limits are enforced based on the number of pre-filtered rows, so some // filters can lead to lopsided assignments. // // Read sessions automatically expire 6 hours after they are created and do // not require manual clean-up by the caller. CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) // Reads rows from the stream in the format prescribed by the ReadSession. // Each response contains one or more table rows, up to a maximum of 100 MiB // per response; read requests which attempt to read individual rows larger // than 100 MiB will fail. // // Each request also returns a set of stream statistics reflecting the current // state of the stream. ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryRead_ReadRowsClient, error) // Splits a given `ReadStream` into two `ReadStream` objects. These // `ReadStream` objects are referred to as the primary and the residual // streams of the split. The original `ReadStream` can still be read from in // the same manner as before. Both of the returned `ReadStream` objects can // also be read from, and the rows returned by both child streams will be // the same as the rows read from the original stream. // // Moreover, the two child streams will be allocated back-to-back in the // original `ReadStream`. Concretely, it is guaranteed that for streams // original, primary, and residual, that original[0-j] = primary[0-j] and // original[j-n] = residual[0-m] once the streams have been read to // completion. SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) }
BigQueryReadClient is the client API for BigQueryRead service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryReadClient ¶
func NewBigQueryReadClient(cc grpc.ClientConnInterface) BigQueryReadClient
type BigQueryReadServer ¶
type BigQueryReadServer interface { // Creates a new read session. A read session divides the contents of a // BigQuery table into one or more streams, which can then be used to read // data from the table. The read session also specifies properties of the // data to be read, such as a list of columns or a push-down filter describing // the rows to be returned. // // A particular row can be read by at most one stream. When the caller has // reached the end of each stream in the session, then all the data in the // table has been read. // // Data is assigned to each stream such that roughly the same number of // rows can be read from each stream. Because the server-side unit for // assigning data is collections of rows, the API does not guarantee that // each stream will return the same number or rows. Additionally, the // limits are enforced based on the number of pre-filtered rows, so some // filters can lead to lopsided assignments. // // Read sessions automatically expire 6 hours after they are created and do // not require manual clean-up by the caller. CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error) // Reads rows from the stream in the format prescribed by the ReadSession. // Each response contains one or more table rows, up to a maximum of 100 MiB // per response; read requests which attempt to read individual rows larger // than 100 MiB will fail. // // Each request also returns a set of stream statistics reflecting the current // state of the stream. ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error // Splits a given `ReadStream` into two `ReadStream` objects. These // `ReadStream` objects are referred to as the primary and the residual // streams of the split. The original `ReadStream` can still be read from in // the same manner as before. Both of the returned `ReadStream` objects can // also be read from, and the rows returned by both child streams will be // the same as the rows read from the original stream. // // Moreover, the two child streams will be allocated back-to-back in the // original `ReadStream`. Concretely, it is guaranteed that for streams // original, primary, and residual, that original[0-j] = primary[0-j] and // original[j-n] = residual[0-m] once the streams have been read to // completion. SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error) }
BigQueryReadServer is the server API for BigQueryRead service.
type BigQueryRead_ReadRowsClient ¶
type BigQueryRead_ReadRowsClient interface { Recv() (*ReadRowsResponse, error) grpc.ClientStream }
type BigQueryRead_ReadRowsServer ¶
type BigQueryRead_ReadRowsServer interface { Send(*ReadRowsResponse) error grpc.ServerStream }
type BigQueryWriteClient ¶
type BigQueryWriteClient interface { // Creates a write stream to the given table. // Additionally, every table has a special stream named '_default' // to which data can be written. This stream doesn't need to be created using // CreateWriteStream. It is a stream that can be used simultaneously by any // number of clients. Data written to this stream is considered committed as // soon as an acknowledgement is received. CreateWriteStream(ctx context.Context, in *CreateWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) // Appends data to the given stream. // // If `offset` is specified, the `offset` is checked against the end of // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an // attempt is made to append to an offset beyond the current end of the stream // or `ALREADY_EXISTS` if user provides an `offset` that has already been // written to. User can retry with adjusted offset within the same RPC // connection. If `offset` is not specified, append happens at the end of the // stream. // // The response contains an optional offset at which the append // happened. No offset information will be returned for appends to a // default stream. // // Responses are received in the same order in which requests are sent. // There will be one response for each successful inserted request. Responses // may optionally embed error information if the originating AppendRequest was // not successfully processed. // // The specifics of when successfully appended data is made visible to the // table are governed by the type of stream: // // * For COMMITTED streams (which includes the default stream), data is // visible immediately upon successful append. // // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` // rpc which advances a cursor to a newer offset in the stream. // // * For PENDING streams, data is not made visible until the stream itself is // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly // committed via the `BatchCommitWriteStreams` rpc. AppendRows(ctx context.Context, opts ...grpc.CallOption) (BigQueryWrite_AppendRowsClient, error) // Gets information about a write stream. GetWriteStream(ctx context.Context, in *GetWriteStreamRequest, opts ...grpc.CallOption) (*WriteStream, error) // Finalize a write stream so that no new data can be appended to the // stream. Finalize is not supported on the '_default' stream. FinalizeWriteStream(ctx context.Context, in *FinalizeWriteStreamRequest, opts ...grpc.CallOption) (*FinalizeWriteStreamResponse, error) // Atomically commits a group of `PENDING` streams that belong to the same // `parent` table. // // Streams must be finalized before commit and cannot be committed multiple // times. Once a stream is committed, data in the stream becomes available // for read operations. BatchCommitWriteStreams(ctx context.Context, in *BatchCommitWriteStreamsRequest, opts ...grpc.CallOption) (*BatchCommitWriteStreamsResponse, error) // Flushes rows to a BUFFERED stream. // // If users are appending rows to BUFFERED stream, flush operation is // required in order for the rows to become available for reading. A // Flush operation flushes up to any previously flushed offset in a BUFFERED // stream, to the offset specified in the request. // // Flush is not supported on the _default stream, since it is not BUFFERED. FlushRows(ctx context.Context, in *FlushRowsRequest, opts ...grpc.CallOption) (*FlushRowsResponse, error) }
BigQueryWriteClient is the client API for BigQueryWrite service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryWriteClient ¶
func NewBigQueryWriteClient(cc grpc.ClientConnInterface) BigQueryWriteClient
type BigQueryWriteServer ¶
type BigQueryWriteServer interface { // Creates a write stream to the given table. // Additionally, every table has a special stream named '_default' // to which data can be written. This stream doesn't need to be created using // CreateWriteStream. It is a stream that can be used simultaneously by any // number of clients. Data written to this stream is considered committed as // soon as an acknowledgement is received. CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error) // Appends data to the given stream. // // If `offset` is specified, the `offset` is checked against the end of // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an // attempt is made to append to an offset beyond the current end of the stream // or `ALREADY_EXISTS` if user provides an `offset` that has already been // written to. User can retry with adjusted offset within the same RPC // connection. If `offset` is not specified, append happens at the end of the // stream. // // The response contains an optional offset at which the append // happened. No offset information will be returned for appends to a // default stream. // // Responses are received in the same order in which requests are sent. // There will be one response for each successful inserted request. Responses // may optionally embed error information if the originating AppendRequest was // not successfully processed. // // The specifics of when successfully appended data is made visible to the // table are governed by the type of stream: // // * For COMMITTED streams (which includes the default stream), data is // visible immediately upon successful append. // // * For BUFFERED streams, data is made visible via a subsequent `FlushRows` // rpc which advances a cursor to a newer offset in the stream. // // * For PENDING streams, data is not made visible until the stream itself is // finalized (via the `FinalizeWriteStream` rpc), and the stream is explicitly // committed via the `BatchCommitWriteStreams` rpc. AppendRows(BigQueryWrite_AppendRowsServer) error // Gets information about a write stream. GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error) // Finalize a write stream so that no new data can be appended to the // stream. Finalize is not supported on the '_default' stream. FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error) // Atomically commits a group of `PENDING` streams that belong to the same // `parent` table. // // Streams must be finalized before commit and cannot be committed multiple // times. Once a stream is committed, data in the stream becomes available // for read operations. BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error) // Flushes rows to a BUFFERED stream. // // If users are appending rows to BUFFERED stream, flush operation is // required in order for the rows to become available for reading. A // Flush operation flushes up to any previously flushed offset in a BUFFERED // stream, to the offset specified in the request. // // Flush is not supported on the _default stream, since it is not BUFFERED. FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error) }
BigQueryWriteServer is the server API for BigQueryWrite service.
type BigQueryWrite_AppendRowsClient ¶
type BigQueryWrite_AppendRowsClient interface { Send(*AppendRowsRequest) error Recv() (*AppendRowsResponse, error) grpc.ClientStream }
type BigQueryWrite_AppendRowsServer ¶
type BigQueryWrite_AppendRowsServer interface { Send(*AppendRowsResponse) error Recv() (*AppendRowsRequest, error) grpc.ServerStream }
type CreateReadSessionRequest ¶
type CreateReadSessionRequest struct { // Required. The request project that owns the session, in the form of // `projects/{project_id}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. Session to be created. ReadSession *ReadSession `protobuf:"bytes,2,opt,name=read_session,json=readSession,proto3" json:"read_session,omitempty"` // Max initial number of streams. If unset or zero, the server will // provide a value of streams so as to produce reasonable throughput. Must be // non-negative. The number of streams may be lower than the requested number, // depending on the amount parallelism that is reasonable for the table. // There is a default system max limit of 1,000. // // This must be greater than or equal to preferred_min_stream_count. // Typically, clients should either leave this unset to let the system to // determine an upper bound OR set this a size for the maximum "units of work" // it can gracefully handle. MaxStreamCount int32 `protobuf:"varint,3,opt,name=max_stream_count,json=maxStreamCount,proto3" json:"max_stream_count,omitempty"` // The minimum preferred stream count. This parameter can be used to inform // the service that there is a desired lower bound on the number of streams. // This is typically a target parallelism of the client (e.g. a Spark // cluster with N-workers would set this to a low multiple of N to ensure // good cluster utilization). // // The system will make a best effort to provide at least this number of // streams, but in some cases might provide less. PreferredMinStreamCount int32 `` /* 135-byte string literal not displayed */ // contains filtered or unexported fields }
Request message for `CreateReadSession`.
func (*CreateReadSessionRequest) Descriptor
deprecated
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateReadSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateReadSessionRequest) GetMaxStreamCount ¶
func (x *CreateReadSessionRequest) GetMaxStreamCount() int32
func (*CreateReadSessionRequest) GetParent ¶
func (x *CreateReadSessionRequest) GetParent() string
func (*CreateReadSessionRequest) GetPreferredMinStreamCount ¶
func (x *CreateReadSessionRequest) GetPreferredMinStreamCount() int32
func (*CreateReadSessionRequest) GetReadSession ¶
func (x *CreateReadSessionRequest) GetReadSession() *ReadSession
func (*CreateReadSessionRequest) ProtoMessage ¶
func (*CreateReadSessionRequest) ProtoMessage()
func (*CreateReadSessionRequest) ProtoReflect ¶
func (x *CreateReadSessionRequest) ProtoReflect() protoreflect.Message
func (*CreateReadSessionRequest) Reset ¶
func (x *CreateReadSessionRequest) Reset()
func (*CreateReadSessionRequest) String ¶
func (x *CreateReadSessionRequest) String() string
type CreateWriteStreamRequest ¶
type CreateWriteStreamRequest struct { // Required. Reference to the table to which the stream belongs, in the format // of `projects/{project}/datasets/{dataset}/tables/{table}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. Stream to be created. WriteStream *WriteStream `protobuf:"bytes,2,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` // contains filtered or unexported fields }
Request message for `CreateWriteStream`.
func (*CreateWriteStreamRequest) Descriptor
deprecated
func (*CreateWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*CreateWriteStreamRequest) GetParent ¶
func (x *CreateWriteStreamRequest) GetParent() string
func (*CreateWriteStreamRequest) GetWriteStream ¶
func (x *CreateWriteStreamRequest) GetWriteStream() *WriteStream
func (*CreateWriteStreamRequest) ProtoMessage ¶
func (*CreateWriteStreamRequest) ProtoMessage()
func (*CreateWriteStreamRequest) ProtoReflect ¶
func (x *CreateWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*CreateWriteStreamRequest) Reset ¶
func (x *CreateWriteStreamRequest) Reset()
func (*CreateWriteStreamRequest) String ¶
func (x *CreateWriteStreamRequest) String() string
type DataFormat ¶
type DataFormat int32
Data format for input or output data.
const ( // Data format is unspecified. DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0 // Avro is a standard open source row based file format. // See https://avro.apache.org/ for more details. DataFormat_AVRO DataFormat = 1 // Arrow is a standard open source column-based message format. // See https://arrow.apache.org/ for more details. DataFormat_ARROW DataFormat = 2 )
func (DataFormat) Descriptor ¶
func (DataFormat) Descriptor() protoreflect.EnumDescriptor
func (DataFormat) Enum ¶
func (x DataFormat) Enum() *DataFormat
func (DataFormat) EnumDescriptor
deprecated
func (DataFormat) EnumDescriptor() ([]byte, []int)
Deprecated: Use DataFormat.Descriptor instead.
func (DataFormat) Number ¶
func (x DataFormat) Number() protoreflect.EnumNumber
func (DataFormat) String ¶
func (x DataFormat) String() string
func (DataFormat) Type ¶
func (DataFormat) Type() protoreflect.EnumType
type FinalizeWriteStreamRequest ¶
type FinalizeWriteStreamRequest struct { // Required. Name of the stream to finalize, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // contains filtered or unexported fields }
Request message for invoking `FinalizeWriteStream`.
func (*FinalizeWriteStreamRequest) Descriptor
deprecated
func (*FinalizeWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use FinalizeWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamRequest) GetName ¶
func (x *FinalizeWriteStreamRequest) GetName() string
func (*FinalizeWriteStreamRequest) ProtoMessage ¶
func (*FinalizeWriteStreamRequest) ProtoMessage()
func (*FinalizeWriteStreamRequest) ProtoReflect ¶
func (x *FinalizeWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*FinalizeWriteStreamRequest) Reset ¶
func (x *FinalizeWriteStreamRequest) Reset()
func (*FinalizeWriteStreamRequest) String ¶
func (x *FinalizeWriteStreamRequest) String() string
type FinalizeWriteStreamResponse ¶
type FinalizeWriteStreamResponse struct { // Number of rows in the finalized stream. RowCount int64 `protobuf:"varint,1,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` // contains filtered or unexported fields }
Response message for `FinalizeWriteStream`.
func (*FinalizeWriteStreamResponse) Descriptor
deprecated
func (*FinalizeWriteStreamResponse) Descriptor() ([]byte, []int)
Deprecated: Use FinalizeWriteStreamResponse.ProtoReflect.Descriptor instead.
func (*FinalizeWriteStreamResponse) GetRowCount ¶
func (x *FinalizeWriteStreamResponse) GetRowCount() int64
func (*FinalizeWriteStreamResponse) ProtoMessage ¶
func (*FinalizeWriteStreamResponse) ProtoMessage()
func (*FinalizeWriteStreamResponse) ProtoReflect ¶
func (x *FinalizeWriteStreamResponse) ProtoReflect() protoreflect.Message
func (*FinalizeWriteStreamResponse) Reset ¶
func (x *FinalizeWriteStreamResponse) Reset()
func (*FinalizeWriteStreamResponse) String ¶
func (x *FinalizeWriteStreamResponse) String() string
type FlushRowsRequest ¶
type FlushRowsRequest struct { // Required. The stream that is the target of the flush operation. WriteStream string `protobuf:"bytes,1,opt,name=write_stream,json=writeStream,proto3" json:"write_stream,omitempty"` // Ending offset of the flush operation. Rows before this offset(including // this offset) will be flushed. Offset *wrapperspb.Int64Value `protobuf:"bytes,2,opt,name=offset,proto3" json:"offset,omitempty"` // contains filtered or unexported fields }
Request message for `FlushRows`.
func (*FlushRowsRequest) Descriptor
deprecated
func (*FlushRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use FlushRowsRequest.ProtoReflect.Descriptor instead.
func (*FlushRowsRequest) GetOffset ¶
func (x *FlushRowsRequest) GetOffset() *wrapperspb.Int64Value
func (*FlushRowsRequest) GetWriteStream ¶
func (x *FlushRowsRequest) GetWriteStream() string
func (*FlushRowsRequest) ProtoMessage ¶
func (*FlushRowsRequest) ProtoMessage()
func (*FlushRowsRequest) ProtoReflect ¶
func (x *FlushRowsRequest) ProtoReflect() protoreflect.Message
func (*FlushRowsRequest) Reset ¶
func (x *FlushRowsRequest) Reset()
func (*FlushRowsRequest) String ¶
func (x *FlushRowsRequest) String() string
type FlushRowsResponse ¶
type FlushRowsResponse struct { // The rows before this offset (including this offset) are flushed. Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` // contains filtered or unexported fields }
Respond message for `FlushRows`.
func (*FlushRowsResponse) Descriptor
deprecated
func (*FlushRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use FlushRowsResponse.ProtoReflect.Descriptor instead.
func (*FlushRowsResponse) GetOffset ¶
func (x *FlushRowsResponse) GetOffset() int64
func (*FlushRowsResponse) ProtoMessage ¶
func (*FlushRowsResponse) ProtoMessage()
func (*FlushRowsResponse) ProtoReflect ¶
func (x *FlushRowsResponse) ProtoReflect() protoreflect.Message
func (*FlushRowsResponse) Reset ¶
func (x *FlushRowsResponse) Reset()
func (*FlushRowsResponse) String ¶
func (x *FlushRowsResponse) String() string
type GetWriteStreamRequest ¶
type GetWriteStreamRequest struct { // Required. Name of the stream to get, in the form of // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Indicates whether to get full or partial view of the WriteStream. If // not set, view returned will be basic. View WriteStreamView `protobuf:"varint,3,opt,name=view,proto3,enum=google.cloud.bigquery.storage.v1.WriteStreamView" json:"view,omitempty"` // contains filtered or unexported fields }
Request message for `GetWriteStreamRequest`.
func (*GetWriteStreamRequest) Descriptor
deprecated
func (*GetWriteStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetWriteStreamRequest.ProtoReflect.Descriptor instead.
func (*GetWriteStreamRequest) GetName ¶
func (x *GetWriteStreamRequest) GetName() string
func (*GetWriteStreamRequest) GetView ¶
func (x *GetWriteStreamRequest) GetView() WriteStreamView
func (*GetWriteStreamRequest) ProtoMessage ¶
func (*GetWriteStreamRequest) ProtoMessage()
func (*GetWriteStreamRequest) ProtoReflect ¶
func (x *GetWriteStreamRequest) ProtoReflect() protoreflect.Message
func (*GetWriteStreamRequest) Reset ¶
func (x *GetWriteStreamRequest) Reset()
func (*GetWriteStreamRequest) String ¶
func (x *GetWriteStreamRequest) String() string
type ProtoRows ¶
type ProtoRows struct { // A sequence of rows serialized as a Protocol Buffer. // // See https://developers.google.com/protocol-buffers/docs/overview for more // information on deserializing this field. SerializedRows [][]byte `protobuf:"bytes,1,rep,name=serialized_rows,json=serializedRows,proto3" json:"serialized_rows,omitempty"` // contains filtered or unexported fields }
func (*ProtoRows) Descriptor
deprecated
func (*ProtoRows) GetSerializedRows ¶
func (*ProtoRows) ProtoMessage ¶
func (*ProtoRows) ProtoMessage()
func (*ProtoRows) ProtoReflect ¶
func (x *ProtoRows) ProtoReflect() protoreflect.Message
type ProtoSchema ¶
type ProtoSchema struct { // Descriptor for input message. The provided descriptor must be self // contained, such that data rows sent can be fully decoded using only the // single descriptor. For data rows that are compositions of multiple // independent messages, this means the descriptor may need to be transformed // to only use nested types: // https://developers.google.com/protocol-buffers/docs/proto#nested // // For additional information for how proto types and values map onto BigQuery // see: https://cloud.google.com/bigquery/docs/write-api#data_type_conversions ProtoDescriptor *descriptorpb.DescriptorProto `protobuf:"bytes,1,opt,name=proto_descriptor,json=protoDescriptor,proto3" json:"proto_descriptor,omitempty"` // contains filtered or unexported fields }
ProtoSchema describes the schema of the serialized protocol buffer data rows.
func (*ProtoSchema) Descriptor
deprecated
func (*ProtoSchema) Descriptor() ([]byte, []int)
Deprecated: Use ProtoSchema.ProtoReflect.Descriptor instead.
func (*ProtoSchema) GetProtoDescriptor ¶
func (x *ProtoSchema) GetProtoDescriptor() *descriptorpb.DescriptorProto
func (*ProtoSchema) ProtoMessage ¶
func (*ProtoSchema) ProtoMessage()
func (*ProtoSchema) ProtoReflect ¶
func (x *ProtoSchema) ProtoReflect() protoreflect.Message
func (*ProtoSchema) Reset ¶
func (x *ProtoSchema) Reset()
func (*ProtoSchema) String ¶
func (x *ProtoSchema) String() string
type ReadRowsRequest ¶
type ReadRowsRequest struct { // Required. Stream to read rows from. ReadStream string `protobuf:"bytes,1,opt,name=read_stream,json=readStream,proto3" json:"read_stream,omitempty"` // The offset requested must be less than the last row read from Read. // Requesting a larger offset is undefined. If not specified, start reading // from offset zero. Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` // contains filtered or unexported fields }
Request message for `ReadRows`.
func (*ReadRowsRequest) Descriptor
deprecated
func (*ReadRowsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsRequest.ProtoReflect.Descriptor instead.
func (*ReadRowsRequest) GetOffset ¶
func (x *ReadRowsRequest) GetOffset() int64
func (*ReadRowsRequest) GetReadStream ¶
func (x *ReadRowsRequest) GetReadStream() string
func (*ReadRowsRequest) ProtoMessage ¶
func (*ReadRowsRequest) ProtoMessage()
func (*ReadRowsRequest) ProtoReflect ¶
func (x *ReadRowsRequest) ProtoReflect() protoreflect.Message
func (*ReadRowsRequest) Reset ¶
func (x *ReadRowsRequest) Reset()
func (*ReadRowsRequest) String ¶
func (x *ReadRowsRequest) String() string
type ReadRowsResponse ¶
type ReadRowsResponse struct { // Row data is returned in format specified during session creation. // // Types that are assignable to Rows: // *ReadRowsResponse_AvroRows // *ReadRowsResponse_ArrowRecordBatch Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"` // Number of serialized rows in the rows block. RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` // Statistics for the stream. Stats *StreamStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"` // Throttling state. If unset, the latest response still describes // the current throttling status. ThrottleState *ThrottleState `protobuf:"bytes,5,opt,name=throttle_state,json=throttleState,proto3" json:"throttle_state,omitempty"` // The schema for the read. If read_options.selected_fields is set, the // schema may be different from the table schema as it will only contain // the selected fields. This schema is equivalent to the one returned by // CreateSession. This field is only populated in the first ReadRowsResponse // RPC. // // Types that are assignable to Schema: // *ReadRowsResponse_AvroSchema // *ReadRowsResponse_ArrowSchema Schema isReadRowsResponse_Schema `protobuf_oneof:"schema"` // contains filtered or unexported fields }
Response from calling `ReadRows` may include row data, progress and throttling information.
func (*ReadRowsResponse) Descriptor
deprecated
func (*ReadRowsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ReadRowsResponse.ProtoReflect.Descriptor instead.
func (*ReadRowsResponse) GetArrowRecordBatch ¶
func (x *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
func (*ReadRowsResponse) GetArrowSchema ¶
func (x *ReadRowsResponse) GetArrowSchema() *ArrowSchema
func (*ReadRowsResponse) GetAvroRows ¶
func (x *ReadRowsResponse) GetAvroRows() *AvroRows
func (*ReadRowsResponse) GetAvroSchema ¶
func (x *ReadRowsResponse) GetAvroSchema() *AvroSchema
func (*ReadRowsResponse) GetRowCount ¶
func (x *ReadRowsResponse) GetRowCount() int64
func (*ReadRowsResponse) GetRows ¶
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
func (*ReadRowsResponse) GetSchema ¶
func (m *ReadRowsResponse) GetSchema() isReadRowsResponse_Schema
func (*ReadRowsResponse) GetStats ¶
func (x *ReadRowsResponse) GetStats() *StreamStats
func (*ReadRowsResponse) GetThrottleState ¶
func (x *ReadRowsResponse) GetThrottleState() *ThrottleState
func (*ReadRowsResponse) ProtoMessage ¶
func (*ReadRowsResponse) ProtoMessage()
func (*ReadRowsResponse) ProtoReflect ¶
func (x *ReadRowsResponse) ProtoReflect() protoreflect.Message
func (*ReadRowsResponse) Reset ¶
func (x *ReadRowsResponse) Reset()
func (*ReadRowsResponse) String ¶
func (x *ReadRowsResponse) String() string
type ReadRowsResponse_ArrowRecordBatch ¶
type ReadRowsResponse_ArrowRecordBatch struct { // Serialized row data in Arrow RecordBatch format. ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"` }
type ReadRowsResponse_ArrowSchema ¶
type ReadRowsResponse_ArrowSchema struct { // Output only. Arrow schema. ArrowSchema *ArrowSchema `protobuf:"bytes,8,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"` }
type ReadRowsResponse_AvroRows ¶
type ReadRowsResponse_AvroRows struct { // Serialized row data in AVRO format. AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"` }
type ReadRowsResponse_AvroSchema ¶
type ReadRowsResponse_AvroSchema struct { // Output only. Avro schema. AvroSchema *AvroSchema `protobuf:"bytes,7,opt,name=avro_schema,json=avroSchema,proto3,oneof"` }
type ReadSession ¶
type ReadSession struct { // Output only. Unique identifier for the session, in the form // `projects/{project_id}/locations/{location}/sessions/{session_id}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Output only. Time at which the session becomes invalid. After this time, // subsequent requests to read this Session will return errors. The // expire_time is automatically assigned and currently cannot be specified or // updated. ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // Immutable. Data format of the output data. DATA_FORMAT_UNSPECIFIED not // supported. DataFormat DataFormat `` /* 141-byte string literal not displayed */ // The schema for the read. If read_options.selected_fields is set, the // schema may be different from the table schema as it will only contain // the selected fields. // // Types that are assignable to Schema: // *ReadSession_AvroSchema // *ReadSession_ArrowSchema Schema isReadSession_Schema `protobuf_oneof:"schema"` // Immutable. Table that this ReadSession is reading from, in the form // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}` Table string `protobuf:"bytes,6,opt,name=table,proto3" json:"table,omitempty"` // Optional. Any modifiers which are applied when reading from the specified // table. TableModifiers *ReadSession_TableModifiers `protobuf:"bytes,7,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` // Optional. Read options for this session (e.g. column selection, filters). ReadOptions *ReadSession_TableReadOptions `protobuf:"bytes,8,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"` // Output only. A list of streams created with the session. // // At least one stream is created with the session. In the future, larger // request_stream_count values *may* result in this list being unpopulated, // in that case, the user will need to use a List method to get the streams // instead, which is not yet available. Streams []*ReadStream `protobuf:"bytes,10,rep,name=streams,proto3" json:"streams,omitempty"` // Output only. An estimate on the number of bytes this session will scan when // all streams are completely consumed. This estimate is based on // metadata from the table which might be incomplete or stale. EstimatedTotalBytesScanned int64 `` /* 145-byte string literal not displayed */ // Output only. An estimate on the number of rows present in this session's // streams. This estimate is based on metadata from the table which might be // incomplete or stale. EstimatedRowCount int64 `protobuf:"varint,14,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"` // Optional. ID set by client to annotate a session identity. This does not // need to be strictly unique, but instead the same ID should be used to group // logically connected sessions (e.g. All using the same ID for all sessions // needed to complete a Spark SQL query is reasonable). // // Maximum length is 256 bytes. TraceId string `protobuf:"bytes,13,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` // contains filtered or unexported fields }
Information about the ReadSession.
func (*ReadSession) Descriptor
deprecated
func (*ReadSession) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession.ProtoReflect.Descriptor instead.
func (*ReadSession) GetArrowSchema ¶
func (x *ReadSession) GetArrowSchema() *ArrowSchema
func (*ReadSession) GetAvroSchema ¶
func (x *ReadSession) GetAvroSchema() *AvroSchema
func (*ReadSession) GetDataFormat ¶
func (x *ReadSession) GetDataFormat() DataFormat
func (*ReadSession) GetEstimatedRowCount ¶ added in v1.45.0
func (x *ReadSession) GetEstimatedRowCount() int64
func (*ReadSession) GetEstimatedTotalBytesScanned ¶
func (x *ReadSession) GetEstimatedTotalBytesScanned() int64
func (*ReadSession) GetExpireTime ¶
func (x *ReadSession) GetExpireTime() *timestamppb.Timestamp
func (*ReadSession) GetName ¶
func (x *ReadSession) GetName() string
func (*ReadSession) GetReadOptions ¶
func (x *ReadSession) GetReadOptions() *ReadSession_TableReadOptions
func (*ReadSession) GetSchema ¶
func (m *ReadSession) GetSchema() isReadSession_Schema
func (*ReadSession) GetStreams ¶
func (x *ReadSession) GetStreams() []*ReadStream
func (*ReadSession) GetTable ¶
func (x *ReadSession) GetTable() string
func (*ReadSession) GetTableModifiers ¶
func (x *ReadSession) GetTableModifiers() *ReadSession_TableModifiers
func (*ReadSession) GetTraceId ¶
func (x *ReadSession) GetTraceId() string
func (*ReadSession) ProtoMessage ¶
func (*ReadSession) ProtoMessage()
func (*ReadSession) ProtoReflect ¶
func (x *ReadSession) ProtoReflect() protoreflect.Message
func (*ReadSession) Reset ¶
func (x *ReadSession) Reset()
func (*ReadSession) String ¶
func (x *ReadSession) String() string
type ReadSession_ArrowSchema ¶
type ReadSession_ArrowSchema struct { // Output only. Arrow schema. ArrowSchema *ArrowSchema `protobuf:"bytes,5,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"` }
type ReadSession_AvroSchema ¶
type ReadSession_AvroSchema struct { // Output only. Avro schema. AvroSchema *AvroSchema `protobuf:"bytes,4,opt,name=avro_schema,json=avroSchema,proto3,oneof"` }
type ReadSession_TableModifiers ¶
type ReadSession_TableModifiers struct { // The snapshot time of the table. If not set, interpreted as now. SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` // contains filtered or unexported fields }
Additional attributes when reading a table.
func (*ReadSession_TableModifiers) Descriptor
deprecated
func (*ReadSession_TableModifiers) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession_TableModifiers.ProtoReflect.Descriptor instead.
func (*ReadSession_TableModifiers) GetSnapshotTime ¶
func (x *ReadSession_TableModifiers) GetSnapshotTime() *timestamppb.Timestamp
func (*ReadSession_TableModifiers) ProtoMessage ¶
func (*ReadSession_TableModifiers) ProtoMessage()
func (*ReadSession_TableModifiers) ProtoReflect ¶
func (x *ReadSession_TableModifiers) ProtoReflect() protoreflect.Message
func (*ReadSession_TableModifiers) Reset ¶
func (x *ReadSession_TableModifiers) Reset()
func (*ReadSession_TableModifiers) String ¶
func (x *ReadSession_TableModifiers) String() string
type ReadSession_TableReadOptions ¶
type ReadSession_TableReadOptions struct { // Optional. The names of the fields in the table to be returned. If no // field names are specified, then all fields in the table are returned. // // Nested fields -- the child elements of a STRUCT field -- can be selected // individually using their fully-qualified names, and will be returned as // record fields containing only the selected nested fields. If a STRUCT // field is specified in the selected fields list, all of the child elements // will be returned. // // As an example, consider a table with the following schema: // // { // "name": "struct_field", // "type": "RECORD", // "mode": "NULLABLE", // "fields": [ // { // "name": "string_field1", // "type": "STRING", // . "mode": "NULLABLE" // }, // { // "name": "string_field2", // "type": "STRING", // "mode": "NULLABLE" // } // ] // } // // Specifying "struct_field" in the selected fields list will result in a // read session schema with the following logical structure: // // struct_field { // string_field1 // string_field2 // } // // Specifying "struct_field.string_field1" in the selected fields list will // result in a read session schema with the following logical structure: // // struct_field { // string_field1 // } // // The order of the fields in the read session schema is derived from the // table schema and does not correspond to the order in which the fields are // specified in this list. SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"` // SQL text filtering statement, similar to a WHERE clause in a query. // Aggregates are not supported. // // Examples: "int_field > 5" // "date_field = CAST('2014-9-27' as DATE)" // "nullable_field is not NULL" // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" // "numeric_field BETWEEN 1.0 AND 5.0" // // Restricted to a maximum length for 1 MB. RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"` // Types that are assignable to OutputFormatSerializationOptions: // *ReadSession_TableReadOptions_ArrowSerializationOptions // *ReadSession_TableReadOptions_AvroSerializationOptions OutputFormatSerializationOptions isReadSession_TableReadOptions_OutputFormatSerializationOptions `protobuf_oneof:"output_format_serialization_options"` // contains filtered or unexported fields }
Options dictating how we read a table.
func (*ReadSession_TableReadOptions) Descriptor
deprecated
func (*ReadSession_TableReadOptions) Descriptor() ([]byte, []int)
Deprecated: Use ReadSession_TableReadOptions.ProtoReflect.Descriptor instead.
func (*ReadSession_TableReadOptions) GetArrowSerializationOptions ¶
func (x *ReadSession_TableReadOptions) GetArrowSerializationOptions() *ArrowSerializationOptions
func (*ReadSession_TableReadOptions) GetAvroSerializationOptions ¶
func (x *ReadSession_TableReadOptions) GetAvroSerializationOptions() *AvroSerializationOptions
func (*ReadSession_TableReadOptions) GetOutputFormatSerializationOptions ¶
func (m *ReadSession_TableReadOptions) GetOutputFormatSerializationOptions() isReadSession_TableReadOptions_OutputFormatSerializationOptions
func (*ReadSession_TableReadOptions) GetRowRestriction ¶
func (x *ReadSession_TableReadOptions) GetRowRestriction() string
func (*ReadSession_TableReadOptions) GetSelectedFields ¶
func (x *ReadSession_TableReadOptions) GetSelectedFields() []string
func (*ReadSession_TableReadOptions) ProtoMessage ¶
func (*ReadSession_TableReadOptions) ProtoMessage()
func (*ReadSession_TableReadOptions) ProtoReflect ¶
func (x *ReadSession_TableReadOptions) ProtoReflect() protoreflect.Message
func (*ReadSession_TableReadOptions) Reset ¶
func (x *ReadSession_TableReadOptions) Reset()
func (*ReadSession_TableReadOptions) String ¶
func (x *ReadSession_TableReadOptions) String() string
type ReadSession_TableReadOptions_ArrowSerializationOptions ¶
type ReadSession_TableReadOptions_ArrowSerializationOptions struct { // Optional. Options specific to the Apache Arrow output format. ArrowSerializationOptions *ArrowSerializationOptions `protobuf:"bytes,3,opt,name=arrow_serialization_options,json=arrowSerializationOptions,proto3,oneof"` }
type ReadSession_TableReadOptions_AvroSerializationOptions ¶
type ReadSession_TableReadOptions_AvroSerializationOptions struct { // Optional. Options specific to the Apache Avro output format AvroSerializationOptions *AvroSerializationOptions `protobuf:"bytes,4,opt,name=avro_serialization_options,json=avroSerializationOptions,proto3,oneof"` }
type ReadStream ¶
type ReadStream struct { // Output only. Name of the stream, in the form // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // contains filtered or unexported fields }
Information about a single stream that gets data out of the storage system. Most of the information about `ReadStream` instances is aggregated, making `ReadStream` lightweight.
func (*ReadStream) Descriptor
deprecated
func (*ReadStream) Descriptor() ([]byte, []int)
Deprecated: Use ReadStream.ProtoReflect.Descriptor instead.
func (*ReadStream) GetName ¶
func (x *ReadStream) GetName() string
func (*ReadStream) ProtoMessage ¶
func (*ReadStream) ProtoMessage()
func (*ReadStream) ProtoReflect ¶
func (x *ReadStream) ProtoReflect() protoreflect.Message
func (*ReadStream) Reset ¶
func (x *ReadStream) Reset()
func (*ReadStream) String ¶
func (x *ReadStream) String() string
type RowError ¶
type RowError struct { // Index of the malformed row in the request. Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` // Structured error reason for a row error. Code RowError_RowErrorCode `protobuf:"varint,2,opt,name=code,proto3,enum=google.cloud.bigquery.storage.v1.RowError_RowErrorCode" json:"code,omitempty"` // Description of the issue encountered when processing the row. Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` // contains filtered or unexported fields }
The message that presents row level error info in a request.
func (*RowError) Descriptor
deprecated
func (*RowError) GetCode ¶
func (x *RowError) GetCode() RowError_RowErrorCode
func (*RowError) GetMessage ¶
func (*RowError) ProtoMessage ¶
func (*RowError) ProtoMessage()
func (*RowError) ProtoReflect ¶
func (x *RowError) ProtoReflect() protoreflect.Message
type RowError_RowErrorCode ¶
type RowError_RowErrorCode int32
Error code for `RowError`.
const ( // Default error. RowError_ROW_ERROR_CODE_UNSPECIFIED RowError_RowErrorCode = 0 // One or more fields in the row has errors. RowError_FIELDS_ERROR RowError_RowErrorCode = 1 )
func (RowError_RowErrorCode) Descriptor ¶
func (RowError_RowErrorCode) Descriptor() protoreflect.EnumDescriptor
func (RowError_RowErrorCode) Enum ¶
func (x RowError_RowErrorCode) Enum() *RowError_RowErrorCode
func (RowError_RowErrorCode) EnumDescriptor
deprecated
func (RowError_RowErrorCode) EnumDescriptor() ([]byte, []int)
Deprecated: Use RowError_RowErrorCode.Descriptor instead.
func (RowError_RowErrorCode) Number ¶
func (x RowError_RowErrorCode) Number() protoreflect.EnumNumber
func (RowError_RowErrorCode) String ¶
func (x RowError_RowErrorCode) String() string
func (RowError_RowErrorCode) Type ¶
func (RowError_RowErrorCode) Type() protoreflect.EnumType
type SplitReadStreamRequest ¶
type SplitReadStreamRequest struct { // Required. Name of the stream to split. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // A value in the range (0.0, 1.0) that specifies the fractional point at // which the original stream should be split. The actual split point is // evaluated on pre-filtered rows, so if a filter is provided, then there is // no guarantee that the division of the rows between the new child streams // will be proportional to this fractional value. Additionally, because the // server-side unit for assigning data is collections of rows, this fraction // will always map to a data storage boundary on the server side. Fraction float64 `protobuf:"fixed64,2,opt,name=fraction,proto3" json:"fraction,omitempty"` // contains filtered or unexported fields }
Request message for `SplitReadStream`.
func (*SplitReadStreamRequest) Descriptor
deprecated
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamRequest.ProtoReflect.Descriptor instead.
func (*SplitReadStreamRequest) GetFraction ¶
func (x *SplitReadStreamRequest) GetFraction() float64
func (*SplitReadStreamRequest) GetName ¶
func (x *SplitReadStreamRequest) GetName() string
func (*SplitReadStreamRequest) ProtoMessage ¶
func (*SplitReadStreamRequest) ProtoMessage()
func (*SplitReadStreamRequest) ProtoReflect ¶
func (x *SplitReadStreamRequest) ProtoReflect() protoreflect.Message
func (*SplitReadStreamRequest) Reset ¶
func (x *SplitReadStreamRequest) Reset()
func (*SplitReadStreamRequest) String ¶
func (x *SplitReadStreamRequest) String() string
type SplitReadStreamResponse ¶
type SplitReadStreamResponse struct { // Primary stream, which contains the beginning portion of // |original_stream|. An empty value indicates that the original stream can no // longer be split. PrimaryStream *ReadStream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"` // Remainder stream, which contains the tail of |original_stream|. An empty // value indicates that the original stream can no longer be split. RemainderStream *ReadStream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"` // contains filtered or unexported fields }
Response message for `SplitReadStream`.
func (*SplitReadStreamResponse) Descriptor
deprecated
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)
Deprecated: Use SplitReadStreamResponse.ProtoReflect.Descriptor instead.
func (*SplitReadStreamResponse) GetPrimaryStream ¶
func (x *SplitReadStreamResponse) GetPrimaryStream() *ReadStream
func (*SplitReadStreamResponse) GetRemainderStream ¶
func (x *SplitReadStreamResponse) GetRemainderStream() *ReadStream
func (*SplitReadStreamResponse) ProtoMessage ¶
func (*SplitReadStreamResponse) ProtoMessage()
func (*SplitReadStreamResponse) ProtoReflect ¶
func (x *SplitReadStreamResponse) ProtoReflect() protoreflect.Message
func (*SplitReadStreamResponse) Reset ¶
func (x *SplitReadStreamResponse) Reset()
func (*SplitReadStreamResponse) String ¶
func (x *SplitReadStreamResponse) String() string
type StorageError ¶
type StorageError struct { // BigQuery Storage specific error code. Code StorageError_StorageErrorCode `` /* 130-byte string literal not displayed */ // Name of the failed entity. Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` // Message that describes the error. ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` // contains filtered or unexported fields }
Structured custom BigQuery Storage error message. The error can be attached as error details in the returned rpc Status. In particular, the use of error codes allows more structured error handling, and reduces the need to evaluate unstructured error text strings.
func (*StorageError) Descriptor
deprecated
func (*StorageError) Descriptor() ([]byte, []int)
Deprecated: Use StorageError.ProtoReflect.Descriptor instead.
func (*StorageError) GetCode ¶
func (x *StorageError) GetCode() StorageError_StorageErrorCode
func (*StorageError) GetEntity ¶
func (x *StorageError) GetEntity() string
func (*StorageError) GetErrorMessage ¶
func (x *StorageError) GetErrorMessage() string
func (*StorageError) ProtoMessage ¶
func (*StorageError) ProtoMessage()
func (*StorageError) ProtoReflect ¶
func (x *StorageError) ProtoReflect() protoreflect.Message
func (*StorageError) Reset ¶
func (x *StorageError) Reset()
func (*StorageError) String ¶
func (x *StorageError) String() string
type StorageError_StorageErrorCode ¶
type StorageError_StorageErrorCode int32
Error code for `StorageError`.
const ( // Default error. StorageError_STORAGE_ERROR_CODE_UNSPECIFIED StorageError_StorageErrorCode = 0 // Table is not found in the system. StorageError_TABLE_NOT_FOUND StorageError_StorageErrorCode = 1 // Stream is already committed. StorageError_STREAM_ALREADY_COMMITTED StorageError_StorageErrorCode = 2 // Stream is not found. StorageError_STREAM_NOT_FOUND StorageError_StorageErrorCode = 3 // Invalid Stream type. // For example, you try to commit a stream that is not pending. StorageError_INVALID_STREAM_TYPE StorageError_StorageErrorCode = 4 // Invalid Stream state. // For example, you try to commit a stream that is not finalized or is // garbaged. StorageError_INVALID_STREAM_STATE StorageError_StorageErrorCode = 5 // Stream is finalized. StorageError_STREAM_FINALIZED StorageError_StorageErrorCode = 6 // There is a schema mismatch and it is caused by user schema has extra // field than bigquery schema. StorageError_SCHEMA_MISMATCH_EXTRA_FIELDS StorageError_StorageErrorCode = 7 // Offset already exists. StorageError_OFFSET_ALREADY_EXISTS StorageError_StorageErrorCode = 8 // Offset out of range. StorageError_OFFSET_OUT_OF_RANGE StorageError_StorageErrorCode = 9 )
func (StorageError_StorageErrorCode) Descriptor ¶
func (StorageError_StorageErrorCode) Descriptor() protoreflect.EnumDescriptor
func (StorageError_StorageErrorCode) Enum ¶
func (x StorageError_StorageErrorCode) Enum() *StorageError_StorageErrorCode
func (StorageError_StorageErrorCode) EnumDescriptor
deprecated
func (StorageError_StorageErrorCode) EnumDescriptor() ([]byte, []int)
Deprecated: Use StorageError_StorageErrorCode.Descriptor instead.
func (StorageError_StorageErrorCode) Number ¶
func (x StorageError_StorageErrorCode) Number() protoreflect.EnumNumber
func (StorageError_StorageErrorCode) String ¶
func (x StorageError_StorageErrorCode) String() string
func (StorageError_StorageErrorCode) Type ¶
func (StorageError_StorageErrorCode) Type() protoreflect.EnumType
type StreamStats ¶
type StreamStats struct { // Represents the progress of the current stream. Progress *StreamStats_Progress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"` // contains filtered or unexported fields }
Estimated stream statistics for a given read Stream.
func (*StreamStats) Descriptor
deprecated
func (*StreamStats) Descriptor() ([]byte, []int)
Deprecated: Use StreamStats.ProtoReflect.Descriptor instead.
func (*StreamStats) GetProgress ¶
func (x *StreamStats) GetProgress() *StreamStats_Progress
func (*StreamStats) ProtoMessage ¶
func (*StreamStats) ProtoMessage()
func (*StreamStats) ProtoReflect ¶
func (x *StreamStats) ProtoReflect() protoreflect.Message
func (*StreamStats) Reset ¶
func (x *StreamStats) Reset()
func (*StreamStats) String ¶
func (x *StreamStats) String() string
type StreamStats_Progress ¶
type StreamStats_Progress struct { // The fraction of rows assigned to the stream that have been processed by // the server so far, not including the rows in the current response // message. // // This value, along with `at_response_end`, can be used to interpolate // the progress made as the rows in the message are being processed using // the following formula: `at_response_start + (at_response_end - // at_response_start) * rows_processed_from_response / rows_in_response`. // // Note that if a filter is provided, the `at_response_end` value of the // previous response may not necessarily be equal to the // `at_response_start` value of the current response. AtResponseStart float64 `protobuf:"fixed64,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"` // Similar to `at_response_start`, except that this value includes the // rows in the current response. AtResponseEnd float64 `protobuf:"fixed64,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"` // contains filtered or unexported fields }
func (*StreamStats_Progress) Descriptor
deprecated
func (*StreamStats_Progress) Descriptor() ([]byte, []int)
Deprecated: Use StreamStats_Progress.ProtoReflect.Descriptor instead.
func (*StreamStats_Progress) GetAtResponseEnd ¶
func (x *StreamStats_Progress) GetAtResponseEnd() float64
func (*StreamStats_Progress) GetAtResponseStart ¶
func (x *StreamStats_Progress) GetAtResponseStart() float64
func (*StreamStats_Progress) ProtoMessage ¶
func (*StreamStats_Progress) ProtoMessage()
func (*StreamStats_Progress) ProtoReflect ¶
func (x *StreamStats_Progress) ProtoReflect() protoreflect.Message
func (*StreamStats_Progress) Reset ¶
func (x *StreamStats_Progress) Reset()
func (*StreamStats_Progress) String ¶
func (x *StreamStats_Progress) String() string
type TableFieldSchema ¶
type TableFieldSchema struct { // Required. The field name. The name must contain only letters (a-z, A-Z), // numbers (0-9), or underscores (_), and must start with a letter or // underscore. The maximum length is 128 characters. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The field data type. Type TableFieldSchema_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1.TableFieldSchema_Type" json:"type,omitempty"` // Optional. The field mode. The default value is NULLABLE. Mode TableFieldSchema_Mode `protobuf:"varint,3,opt,name=mode,proto3,enum=google.cloud.bigquery.storage.v1.TableFieldSchema_Mode" json:"mode,omitempty"` // Optional. Describes the nested schema fields if the type property is set to // STRUCT. Fields []*TableFieldSchema `protobuf:"bytes,4,rep,name=fields,proto3" json:"fields,omitempty"` // Optional. The field description. The maximum length is 1,024 characters. Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` // Optional. Maximum length of values of this field for STRINGS or BYTES. // // If max_length is not specified, no maximum length constraint is imposed // on this field. // // If type = "STRING", then max_length represents the maximum UTF-8 // length of strings in this field. // // If type = "BYTES", then max_length represents the maximum number of // bytes in this field. // // It is invalid to set this field if type is not "STRING" or "BYTES". MaxLength int64 `protobuf:"varint,7,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` // Optional. Precision (maximum number of total digits in base 10) and scale // (maximum number of digits in the fractional part in base 10) constraints // for values of this field for NUMERIC or BIGNUMERIC. // // It is invalid to set precision or scale if type is not "NUMERIC" or // "BIGNUMERIC". // // If precision and scale are not specified, no value range constraint is // imposed on this field insofar as values are permitted by the type. // // Values of this NUMERIC or BIGNUMERIC field must be in this range when: // // * Precision (P) and scale (S) are specified: // [-10^(P-S) + 10^(-S), 10^(P-S) - 10^(-S)] // * Precision (P) is specified but not scale (and thus scale is // interpreted to be equal to zero): // [-10^P + 1, 10^P - 1]. // // Acceptable values for precision and scale if both are specified: // // * If type = "NUMERIC": // 1 <= precision - scale <= 29 and 0 <= scale <= 9. // * If type = "BIGNUMERIC": // 1 <= precision - scale <= 38 and 0 <= scale <= 38. // // Acceptable values for precision if only precision is specified but not // scale (and thus scale is interpreted to be equal to zero): // // * If type = "NUMERIC": 1 <= precision <= 29. // * If type = "BIGNUMERIC": 1 <= precision <= 38. // // If scale is specified but not precision, then it is invalid. Precision int64 `protobuf:"varint,8,opt,name=precision,proto3" json:"precision,omitempty"` // Optional. See documentation for precision. Scale int64 `protobuf:"varint,9,opt,name=scale,proto3" json:"scale,omitempty"` // Optional. A SQL expression to specify the [default value] // (https://cloud.google.com/bigquery/docs/default-values) for this field. DefaultValueExpression string `` /* 130-byte string literal not displayed */ // contains filtered or unexported fields }
TableFieldSchema defines a single field/column within a table schema.
func (*TableFieldSchema) Descriptor
deprecated
func (*TableFieldSchema) Descriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema.ProtoReflect.Descriptor instead.
func (*TableFieldSchema) GetDefaultValueExpression ¶ added in v1.47.0
func (x *TableFieldSchema) GetDefaultValueExpression() string
func (*TableFieldSchema) GetDescription ¶
func (x *TableFieldSchema) GetDescription() string
func (*TableFieldSchema) GetFields ¶
func (x *TableFieldSchema) GetFields() []*TableFieldSchema
func (*TableFieldSchema) GetMaxLength ¶
func (x *TableFieldSchema) GetMaxLength() int64
func (*TableFieldSchema) GetMode ¶
func (x *TableFieldSchema) GetMode() TableFieldSchema_Mode
func (*TableFieldSchema) GetName ¶
func (x *TableFieldSchema) GetName() string
func (*TableFieldSchema) GetPrecision ¶
func (x *TableFieldSchema) GetPrecision() int64
func (*TableFieldSchema) GetScale ¶
func (x *TableFieldSchema) GetScale() int64
func (*TableFieldSchema) GetType ¶
func (x *TableFieldSchema) GetType() TableFieldSchema_Type
func (*TableFieldSchema) ProtoMessage ¶
func (*TableFieldSchema) ProtoMessage()
func (*TableFieldSchema) ProtoReflect ¶
func (x *TableFieldSchema) ProtoReflect() protoreflect.Message
func (*TableFieldSchema) Reset ¶
func (x *TableFieldSchema) Reset()
func (*TableFieldSchema) String ¶
func (x *TableFieldSchema) String() string
type TableFieldSchema_Mode ¶
type TableFieldSchema_Mode int32
const ( // Illegal value TableFieldSchema_MODE_UNSPECIFIED TableFieldSchema_Mode = 0 TableFieldSchema_NULLABLE TableFieldSchema_Mode = 1 TableFieldSchema_REQUIRED TableFieldSchema_Mode = 2 TableFieldSchema_REPEATED TableFieldSchema_Mode = 3 )
func (TableFieldSchema_Mode) Descriptor ¶
func (TableFieldSchema_Mode) Descriptor() protoreflect.EnumDescriptor
func (TableFieldSchema_Mode) Enum ¶
func (x TableFieldSchema_Mode) Enum() *TableFieldSchema_Mode
func (TableFieldSchema_Mode) EnumDescriptor
deprecated
func (TableFieldSchema_Mode) EnumDescriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema_Mode.Descriptor instead.
func (TableFieldSchema_Mode) Number ¶
func (x TableFieldSchema_Mode) Number() protoreflect.EnumNumber
func (TableFieldSchema_Mode) String ¶
func (x TableFieldSchema_Mode) String() string
func (TableFieldSchema_Mode) Type ¶
func (TableFieldSchema_Mode) Type() protoreflect.EnumType
type TableFieldSchema_Type ¶
type TableFieldSchema_Type int32
const ( // Illegal value TableFieldSchema_TYPE_UNSPECIFIED TableFieldSchema_Type = 0 // 64K, UTF8 TableFieldSchema_STRING TableFieldSchema_Type = 1 // 64-bit signed TableFieldSchema_INT64 TableFieldSchema_Type = 2 // 64-bit IEEE floating point TableFieldSchema_DOUBLE TableFieldSchema_Type = 3 // Aggregate type TableFieldSchema_STRUCT TableFieldSchema_Type = 4 // 64K, Binary TableFieldSchema_BYTES TableFieldSchema_Type = 5 // 2-valued TableFieldSchema_BOOL TableFieldSchema_Type = 6 // 64-bit signed usec since UTC epoch TableFieldSchema_TIMESTAMP TableFieldSchema_Type = 7 // Civil date - Year, Month, Day TableFieldSchema_DATE TableFieldSchema_Type = 8 // Civil time - Hour, Minute, Second, Microseconds TableFieldSchema_TIME TableFieldSchema_Type = 9 // Combination of civil date and civil time TableFieldSchema_DATETIME TableFieldSchema_Type = 10 // Geography object TableFieldSchema_GEOGRAPHY TableFieldSchema_Type = 11 // Numeric value TableFieldSchema_NUMERIC TableFieldSchema_Type = 12 // BigNumeric value TableFieldSchema_BIGNUMERIC TableFieldSchema_Type = 13 // Interval TableFieldSchema_INTERVAL TableFieldSchema_Type = 14 // JSON, String TableFieldSchema_JSON TableFieldSchema_Type = 15 )
func (TableFieldSchema_Type) Descriptor ¶
func (TableFieldSchema_Type) Descriptor() protoreflect.EnumDescriptor
func (TableFieldSchema_Type) Enum ¶
func (x TableFieldSchema_Type) Enum() *TableFieldSchema_Type
func (TableFieldSchema_Type) EnumDescriptor
deprecated
func (TableFieldSchema_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use TableFieldSchema_Type.Descriptor instead.
func (TableFieldSchema_Type) Number ¶
func (x TableFieldSchema_Type) Number() protoreflect.EnumNumber
func (TableFieldSchema_Type) String ¶
func (x TableFieldSchema_Type) String() string
func (TableFieldSchema_Type) Type ¶
func (TableFieldSchema_Type) Type() protoreflect.EnumType
type TableSchema ¶
type TableSchema struct { // Describes the fields in a table. Fields []*TableFieldSchema `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty"` // contains filtered or unexported fields }
Schema of a table. This schema is a subset of google.cloud.bigquery.v2.TableSchema containing information necessary to generate valid message to write to BigQuery.
func (*TableSchema) Descriptor
deprecated
func (*TableSchema) Descriptor() ([]byte, []int)
Deprecated: Use TableSchema.ProtoReflect.Descriptor instead.
func (*TableSchema) GetFields ¶
func (x *TableSchema) GetFields() []*TableFieldSchema
func (*TableSchema) ProtoMessage ¶
func (*TableSchema) ProtoMessage()
func (*TableSchema) ProtoReflect ¶
func (x *TableSchema) ProtoReflect() protoreflect.Message
func (*TableSchema) Reset ¶
func (x *TableSchema) Reset()
func (*TableSchema) String ¶
func (x *TableSchema) String() string
type ThrottleState ¶
type ThrottleState struct { // How much this connection is being throttled. Zero means no throttling, // 100 means fully throttled. ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"` // contains filtered or unexported fields }
Information on if the current connection is being throttled.
func (*ThrottleState) Descriptor
deprecated
func (*ThrottleState) Descriptor() ([]byte, []int)
Deprecated: Use ThrottleState.ProtoReflect.Descriptor instead.
func (*ThrottleState) GetThrottlePercent ¶
func (x *ThrottleState) GetThrottlePercent() int32
func (*ThrottleState) ProtoMessage ¶
func (*ThrottleState) ProtoMessage()
func (*ThrottleState) ProtoReflect ¶
func (x *ThrottleState) ProtoReflect() protoreflect.Message
func (*ThrottleState) Reset ¶
func (x *ThrottleState) Reset()
func (*ThrottleState) String ¶
func (x *ThrottleState) String() string
type UnimplementedBigQueryReadServer ¶
type UnimplementedBigQueryReadServer struct { }
UnimplementedBigQueryReadServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryReadServer) CreateReadSession ¶
func (*UnimplementedBigQueryReadServer) CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error)
func (*UnimplementedBigQueryReadServer) ReadRows ¶
func (*UnimplementedBigQueryReadServer) ReadRows(*ReadRowsRequest, BigQueryRead_ReadRowsServer) error
func (*UnimplementedBigQueryReadServer) SplitReadStream ¶
func (*UnimplementedBigQueryReadServer) SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
type UnimplementedBigQueryWriteServer ¶
type UnimplementedBigQueryWriteServer struct { }
UnimplementedBigQueryWriteServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryWriteServer) AppendRows ¶
func (*UnimplementedBigQueryWriteServer) AppendRows(BigQueryWrite_AppendRowsServer) error
func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams ¶
func (*UnimplementedBigQueryWriteServer) BatchCommitWriteStreams(context.Context, *BatchCommitWriteStreamsRequest) (*BatchCommitWriteStreamsResponse, error)
func (*UnimplementedBigQueryWriteServer) CreateWriteStream ¶
func (*UnimplementedBigQueryWriteServer) CreateWriteStream(context.Context, *CreateWriteStreamRequest) (*WriteStream, error)
func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream ¶
func (*UnimplementedBigQueryWriteServer) FinalizeWriteStream(context.Context, *FinalizeWriteStreamRequest) (*FinalizeWriteStreamResponse, error)
func (*UnimplementedBigQueryWriteServer) FlushRows ¶
func (*UnimplementedBigQueryWriteServer) FlushRows(context.Context, *FlushRowsRequest) (*FlushRowsResponse, error)
func (*UnimplementedBigQueryWriteServer) GetWriteStream ¶
func (*UnimplementedBigQueryWriteServer) GetWriteStream(context.Context, *GetWriteStreamRequest) (*WriteStream, error)
type WriteStream ¶
type WriteStream struct { // Output only. Name of the stream, in the form // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Immutable. Type of the stream. Type WriteStream_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.bigquery.storage.v1.WriteStream_Type" json:"type,omitempty"` // Output only. Create time of the stream. For the _default stream, this is // the creation_time of the table. CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Commit time of the stream. // If a stream is of `COMMITTED` type, then it will have a commit_time same as // `create_time`. If the stream is of `PENDING` type, empty commit_time // means it is not committed. CommitTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=commit_time,json=commitTime,proto3" json:"commit_time,omitempty"` // Output only. The schema of the destination table. It is only returned in // `CreateWriteStream` response. Caller should generate data that's // compatible with this schema to send in initial `AppendRowsRequest`. // The table schema could go out of date during the life time of the stream. TableSchema *TableSchema `protobuf:"bytes,5,opt,name=table_schema,json=tableSchema,proto3" json:"table_schema,omitempty"` // Immutable. Mode of the stream. WriteMode WriteStream_WriteMode `` /* 149-byte string literal not displayed */ // Immutable. The geographic location where the stream's dataset resides. See // https://cloud.google.com/bigquery/docs/locations for supported // locations. Location string `protobuf:"bytes,8,opt,name=location,proto3" json:"location,omitempty"` // contains filtered or unexported fields }
Information about a single stream that gets data inside the storage system.
func (*WriteStream) Descriptor
deprecated
func (*WriteStream) Descriptor() ([]byte, []int)
Deprecated: Use WriteStream.ProtoReflect.Descriptor instead.
func (*WriteStream) GetCommitTime ¶
func (x *WriteStream) GetCommitTime() *timestamppb.Timestamp
func (*WriteStream) GetCreateTime ¶
func (x *WriteStream) GetCreateTime() *timestamppb.Timestamp
func (*WriteStream) GetLocation ¶
func (x *WriteStream) GetLocation() string
func (*WriteStream) GetName ¶
func (x *WriteStream) GetName() string
func (*WriteStream) GetTableSchema ¶
func (x *WriteStream) GetTableSchema() *TableSchema
func (*WriteStream) GetType ¶
func (x *WriteStream) GetType() WriteStream_Type
func (*WriteStream) GetWriteMode ¶
func (x *WriteStream) GetWriteMode() WriteStream_WriteMode
func (*WriteStream) ProtoMessage ¶
func (*WriteStream) ProtoMessage()
func (*WriteStream) ProtoReflect ¶
func (x *WriteStream) ProtoReflect() protoreflect.Message
func (*WriteStream) Reset ¶
func (x *WriteStream) Reset()
func (*WriteStream) String ¶
func (x *WriteStream) String() string
type WriteStreamView ¶
type WriteStreamView int32
WriteStreamView is a view enum that controls what details about a write stream should be returned.
const ( // The default / unset value. WriteStreamView_WRITE_STREAM_VIEW_UNSPECIFIED WriteStreamView = 0 // The BASIC projection returns basic metadata about a write stream. The // basic view does not include schema information. This is the default view // returned by GetWriteStream. WriteStreamView_BASIC WriteStreamView = 1 // The FULL projection returns all available write stream metadata, including // the schema. CreateWriteStream returns the full projection of write stream // metadata. WriteStreamView_FULL WriteStreamView = 2 )
func (WriteStreamView) Descriptor ¶
func (WriteStreamView) Descriptor() protoreflect.EnumDescriptor
func (WriteStreamView) Enum ¶
func (x WriteStreamView) Enum() *WriteStreamView
func (WriteStreamView) EnumDescriptor
deprecated
func (WriteStreamView) EnumDescriptor() ([]byte, []int)
Deprecated: Use WriteStreamView.Descriptor instead.
func (WriteStreamView) Number ¶
func (x WriteStreamView) Number() protoreflect.EnumNumber
func (WriteStreamView) String ¶
func (x WriteStreamView) String() string
func (WriteStreamView) Type ¶
func (WriteStreamView) Type() protoreflect.EnumType
type WriteStream_Type ¶
type WriteStream_Type int32
Type enum of the stream.
const ( // Unknown type. WriteStream_TYPE_UNSPECIFIED WriteStream_Type = 0 // Data will commit automatically and appear as soon as the write is // acknowledged. WriteStream_COMMITTED WriteStream_Type = 1 // Data is invisible until the stream is committed. WriteStream_PENDING WriteStream_Type = 2 // Data is only visible up to the offset to which it was flushed. WriteStream_BUFFERED WriteStream_Type = 3 )
func (WriteStream_Type) Descriptor ¶
func (WriteStream_Type) Descriptor() protoreflect.EnumDescriptor
func (WriteStream_Type) Enum ¶
func (x WriteStream_Type) Enum() *WriteStream_Type
func (WriteStream_Type) EnumDescriptor
deprecated
func (WriteStream_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use WriteStream_Type.Descriptor instead.
func (WriteStream_Type) Number ¶
func (x WriteStream_Type) Number() protoreflect.EnumNumber
func (WriteStream_Type) String ¶
func (x WriteStream_Type) String() string
func (WriteStream_Type) Type ¶
func (WriteStream_Type) Type() protoreflect.EnumType
type WriteStream_WriteMode ¶
type WriteStream_WriteMode int32
Mode enum of the stream.
const ( // Unknown type. WriteStream_WRITE_MODE_UNSPECIFIED WriteStream_WriteMode = 0 // Insert new records into the table. // It is the default value if customers do not specify it. WriteStream_INSERT WriteStream_WriteMode = 1 )
func (WriteStream_WriteMode) Descriptor ¶
func (WriteStream_WriteMode) Descriptor() protoreflect.EnumDescriptor
func (WriteStream_WriteMode) Enum ¶
func (x WriteStream_WriteMode) Enum() *WriteStream_WriteMode
func (WriteStream_WriteMode) EnumDescriptor
deprecated
func (WriteStream_WriteMode) EnumDescriptor() ([]byte, []int)
Deprecated: Use WriteStream_WriteMode.Descriptor instead.
func (WriteStream_WriteMode) Number ¶
func (x WriteStream_WriteMode) Number() protoreflect.EnumNumber
func (WriteStream_WriteMode) String ¶
func (x WriteStream_WriteMode) String() string
func (WriteStream_WriteMode) Type ¶
func (WriteStream_WriteMode) Type() protoreflect.EnumType