Documentation ¶
Index ¶
- Variables
- func RegisterBigQueryStorageServer(s *grpc.Server, srv BigQueryStorageServer)
- type ArrowRecordBatch
- func (*ArrowRecordBatch) Descriptor() ([]byte, []int)
- func (m *ArrowRecordBatch) GetRowCount() int64
- func (m *ArrowRecordBatch) GetSerializedRecordBatch() []byte
- func (*ArrowRecordBatch) ProtoMessage()
- func (m *ArrowRecordBatch) Reset()
- func (m *ArrowRecordBatch) String() string
- func (m *ArrowRecordBatch) XXX_DiscardUnknown()
- func (m *ArrowRecordBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ArrowRecordBatch) XXX_Merge(src proto.Message)
- func (m *ArrowRecordBatch) XXX_Size() int
- func (m *ArrowRecordBatch) XXX_Unmarshal(b []byte) error
- type ArrowSchema
- func (*ArrowSchema) Descriptor() ([]byte, []int)
- func (m *ArrowSchema) GetSerializedSchema() []byte
- func (*ArrowSchema) ProtoMessage()
- func (m *ArrowSchema) Reset()
- func (m *ArrowSchema) String() string
- func (m *ArrowSchema) XXX_DiscardUnknown()
- func (m *ArrowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ArrowSchema) XXX_Merge(src proto.Message)
- func (m *ArrowSchema) XXX_Size() int
- func (m *ArrowSchema) XXX_Unmarshal(b []byte) error
- type AvroRows
- func (*AvroRows) Descriptor() ([]byte, []int)
- func (m *AvroRows) GetRowCount() int64
- func (m *AvroRows) GetSerializedBinaryRows() []byte
- func (*AvroRows) ProtoMessage()
- func (m *AvroRows) Reset()
- func (m *AvroRows) String() string
- func (m *AvroRows) XXX_DiscardUnknown()
- func (m *AvroRows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *AvroRows) XXX_Merge(src proto.Message)
- func (m *AvroRows) XXX_Size() int
- func (m *AvroRows) XXX_Unmarshal(b []byte) error
- type AvroSchema
- func (*AvroSchema) Descriptor() ([]byte, []int)
- func (m *AvroSchema) GetSchema() string
- func (*AvroSchema) ProtoMessage()
- func (m *AvroSchema) Reset()
- func (m *AvroSchema) String() string
- func (m *AvroSchema) XXX_DiscardUnknown()
- func (m *AvroSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *AvroSchema) XXX_Merge(src proto.Message)
- func (m *AvroSchema) XXX_Size() int
- func (m *AvroSchema) XXX_Unmarshal(b []byte) error
- type BatchCreateReadSessionStreamsRequest
- func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int)
- func (m *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32
- func (m *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSession
- func (*BatchCreateReadSessionStreamsRequest) ProtoMessage()
- func (m *BatchCreateReadSessionStreamsRequest) Reset()
- func (m *BatchCreateReadSessionStreamsRequest) String() string
- func (m *BatchCreateReadSessionStreamsRequest) XXX_DiscardUnknown()
- func (m *BatchCreateReadSessionStreamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *BatchCreateReadSessionStreamsRequest) XXX_Merge(src proto.Message)
- func (m *BatchCreateReadSessionStreamsRequest) XXX_Size() int
- func (m *BatchCreateReadSessionStreamsRequest) XXX_Unmarshal(b []byte) error
- type BatchCreateReadSessionStreamsResponse
- func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int)
- func (m *BatchCreateReadSessionStreamsResponse) GetStreams() []*Stream
- func (*BatchCreateReadSessionStreamsResponse) ProtoMessage()
- func (m *BatchCreateReadSessionStreamsResponse) Reset()
- func (m *BatchCreateReadSessionStreamsResponse) String() string
- func (m *BatchCreateReadSessionStreamsResponse) XXX_DiscardUnknown()
- func (m *BatchCreateReadSessionStreamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *BatchCreateReadSessionStreamsResponse) XXX_Merge(src proto.Message)
- func (m *BatchCreateReadSessionStreamsResponse) XXX_Size() int
- func (m *BatchCreateReadSessionStreamsResponse) XXX_Unmarshal(b []byte) error
- type BigQueryStorageClient
- type BigQueryStorageServer
- type BigQueryStorage_ReadRowsClient
- type BigQueryStorage_ReadRowsServer
- type CreateReadSessionRequest
- func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)
- func (m *CreateReadSessionRequest) GetFormat() DataFormat
- func (m *CreateReadSessionRequest) GetParent() string
- func (m *CreateReadSessionRequest) GetReadOptions() *TableReadOptions
- func (m *CreateReadSessionRequest) GetRequestedStreams() int32
- func (m *CreateReadSessionRequest) GetShardingStrategy() ShardingStrategy
- func (m *CreateReadSessionRequest) GetTableModifiers() *TableModifiers
- func (m *CreateReadSessionRequest) GetTableReference() *TableReference
- func (*CreateReadSessionRequest) ProtoMessage()
- func (m *CreateReadSessionRequest) Reset()
- func (m *CreateReadSessionRequest) String() string
- func (m *CreateReadSessionRequest) XXX_DiscardUnknown()
- func (m *CreateReadSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *CreateReadSessionRequest) XXX_Merge(src proto.Message)
- func (m *CreateReadSessionRequest) XXX_Size() int
- func (m *CreateReadSessionRequest) XXX_Unmarshal(b []byte) error
- type DataFormat
- type FinalizeStreamRequest
- func (*FinalizeStreamRequest) Descriptor() ([]byte, []int)
- func (m *FinalizeStreamRequest) GetStream() *Stream
- func (*FinalizeStreamRequest) ProtoMessage()
- func (m *FinalizeStreamRequest) Reset()
- func (m *FinalizeStreamRequest) String() string
- func (m *FinalizeStreamRequest) XXX_DiscardUnknown()
- func (m *FinalizeStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *FinalizeStreamRequest) XXX_Merge(src proto.Message)
- func (m *FinalizeStreamRequest) XXX_Size() int
- func (m *FinalizeStreamRequest) XXX_Unmarshal(b []byte) error
- type Progress
- func (*Progress) Descriptor() ([]byte, []int)
- func (m *Progress) GetAtResponseEnd() float32
- func (m *Progress) GetAtResponseStart() float32
- func (*Progress) ProtoMessage()
- func (m *Progress) Reset()
- func (m *Progress) String() string
- func (m *Progress) XXX_DiscardUnknown()
- func (m *Progress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Progress) XXX_Merge(src proto.Message)
- func (m *Progress) XXX_Size() int
- func (m *Progress) XXX_Unmarshal(b []byte) error
- type ReadRowsRequest
- func (*ReadRowsRequest) Descriptor() ([]byte, []int)
- func (m *ReadRowsRequest) GetReadPosition() *StreamPosition
- func (*ReadRowsRequest) ProtoMessage()
- func (m *ReadRowsRequest) Reset()
- func (m *ReadRowsRequest) String() string
- func (m *ReadRowsRequest) XXX_DiscardUnknown()
- func (m *ReadRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ReadRowsRequest) XXX_Merge(src proto.Message)
- func (m *ReadRowsRequest) XXX_Size() int
- func (m *ReadRowsRequest) XXX_Unmarshal(b []byte) error
- type ReadRowsResponse
- func (*ReadRowsResponse) Descriptor() ([]byte, []int)
- func (m *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
- func (m *ReadRowsResponse) GetAvroRows() *AvroRows
- func (m *ReadRowsResponse) GetRowCount() int64
- func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
- func (m *ReadRowsResponse) GetStatus() *StreamStatus
- func (m *ReadRowsResponse) GetThrottleStatus() *ThrottleStatus
- func (*ReadRowsResponse) ProtoMessage()
- func (m *ReadRowsResponse) Reset()
- func (m *ReadRowsResponse) String() string
- func (m *ReadRowsResponse) XXX_DiscardUnknown()
- func (m *ReadRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ReadRowsResponse) XXX_Merge(src proto.Message)
- func (*ReadRowsResponse) XXX_OneofWrappers() []interface{}
- func (m *ReadRowsResponse) XXX_Size() int
- func (m *ReadRowsResponse) XXX_Unmarshal(b []byte) error
- type ReadRowsResponse_ArrowRecordBatch
- type ReadRowsResponse_AvroRows
- type ReadSession
- func (*ReadSession) Descriptor() ([]byte, []int)
- func (m *ReadSession) GetArrowSchema() *ArrowSchema
- func (m *ReadSession) GetAvroSchema() *AvroSchema
- func (m *ReadSession) GetExpireTime() *timestamp.Timestamp
- func (m *ReadSession) GetName() string
- func (m *ReadSession) GetSchema() isReadSession_Schema
- func (m *ReadSession) GetShardingStrategy() ShardingStrategy
- func (m *ReadSession) GetStreams() []*Stream
- func (m *ReadSession) GetTableModifiers() *TableModifiers
- func (m *ReadSession) GetTableReference() *TableReference
- func (*ReadSession) ProtoMessage()
- func (m *ReadSession) Reset()
- func (m *ReadSession) String() string
- func (m *ReadSession) XXX_DiscardUnknown()
- func (m *ReadSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ReadSession) XXX_Merge(src proto.Message)
- func (*ReadSession) XXX_OneofWrappers() []interface{}
- func (m *ReadSession) XXX_Size() int
- func (m *ReadSession) XXX_Unmarshal(b []byte) error
- type ReadSession_ArrowSchema
- type ReadSession_AvroSchema
- type ShardingStrategy
- type SplitReadStreamRequest
- func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)
- func (m *SplitReadStreamRequest) GetFraction() float32
- func (m *SplitReadStreamRequest) GetOriginalStream() *Stream
- func (*SplitReadStreamRequest) ProtoMessage()
- func (m *SplitReadStreamRequest) Reset()
- func (m *SplitReadStreamRequest) String() string
- func (m *SplitReadStreamRequest) XXX_DiscardUnknown()
- func (m *SplitReadStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *SplitReadStreamRequest) XXX_Merge(src proto.Message)
- func (m *SplitReadStreamRequest) XXX_Size() int
- func (m *SplitReadStreamRequest) XXX_Unmarshal(b []byte) error
- type SplitReadStreamResponse
- func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)
- func (m *SplitReadStreamResponse) GetPrimaryStream() *Stream
- func (m *SplitReadStreamResponse) GetRemainderStream() *Stream
- func (*SplitReadStreamResponse) ProtoMessage()
- func (m *SplitReadStreamResponse) Reset()
- func (m *SplitReadStreamResponse) String() string
- func (m *SplitReadStreamResponse) XXX_DiscardUnknown()
- func (m *SplitReadStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *SplitReadStreamResponse) XXX_Merge(src proto.Message)
- func (m *SplitReadStreamResponse) XXX_Size() int
- func (m *SplitReadStreamResponse) XXX_Unmarshal(b []byte) error
- type Stream
- func (*Stream) Descriptor() ([]byte, []int)
- func (m *Stream) GetName() string
- func (*Stream) ProtoMessage()
- func (m *Stream) Reset()
- func (m *Stream) String() string
- func (m *Stream) XXX_DiscardUnknown()
- func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *Stream) XXX_Merge(src proto.Message)
- func (m *Stream) XXX_Size() int
- func (m *Stream) XXX_Unmarshal(b []byte) error
- type StreamPosition
- func (*StreamPosition) Descriptor() ([]byte, []int)
- func (m *StreamPosition) GetOffset() int64
- func (m *StreamPosition) GetStream() *Stream
- func (*StreamPosition) ProtoMessage()
- func (m *StreamPosition) Reset()
- func (m *StreamPosition) String() string
- func (m *StreamPosition) XXX_DiscardUnknown()
- func (m *StreamPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *StreamPosition) XXX_Merge(src proto.Message)
- func (m *StreamPosition) XXX_Size() int
- func (m *StreamPosition) XXX_Unmarshal(b []byte) error
- type StreamStatus
- func (*StreamStatus) Descriptor() ([]byte, []int)
- func (m *StreamStatus) GetEstimatedRowCount() int64
- func (m *StreamStatus) GetFractionConsumed() float32
- func (m *StreamStatus) GetIsSplittable() bool
- func (m *StreamStatus) GetProgress() *Progress
- func (*StreamStatus) ProtoMessage()
- func (m *StreamStatus) Reset()
- func (m *StreamStatus) String() string
- func (m *StreamStatus) XXX_DiscardUnknown()
- func (m *StreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *StreamStatus) XXX_Merge(src proto.Message)
- func (m *StreamStatus) XXX_Size() int
- func (m *StreamStatus) XXX_Unmarshal(b []byte) error
- type TableModifiers
- func (*TableModifiers) Descriptor() ([]byte, []int)
- func (m *TableModifiers) GetSnapshotTime() *timestamp.Timestamp
- func (*TableModifiers) ProtoMessage()
- func (m *TableModifiers) Reset()
- func (m *TableModifiers) String() string
- func (m *TableModifiers) XXX_DiscardUnknown()
- func (m *TableModifiers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *TableModifiers) XXX_Merge(src proto.Message)
- func (m *TableModifiers) XXX_Size() int
- func (m *TableModifiers) XXX_Unmarshal(b []byte) error
- type TableReadOptions
- func (*TableReadOptions) Descriptor() ([]byte, []int)
- func (m *TableReadOptions) GetRowRestriction() string
- func (m *TableReadOptions) GetSelectedFields() []string
- func (*TableReadOptions) ProtoMessage()
- func (m *TableReadOptions) Reset()
- func (m *TableReadOptions) String() string
- func (m *TableReadOptions) XXX_DiscardUnknown()
- func (m *TableReadOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *TableReadOptions) XXX_Merge(src proto.Message)
- func (m *TableReadOptions) XXX_Size() int
- func (m *TableReadOptions) XXX_Unmarshal(b []byte) error
- type TableReference
- func (*TableReference) Descriptor() ([]byte, []int)
- func (m *TableReference) GetDatasetId() string
- func (m *TableReference) GetProjectId() string
- func (m *TableReference) GetTableId() string
- func (*TableReference) ProtoMessage()
- func (m *TableReference) Reset()
- func (m *TableReference) String() string
- func (m *TableReference) XXX_DiscardUnknown()
- func (m *TableReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *TableReference) XXX_Merge(src proto.Message)
- func (m *TableReference) XXX_Size() int
- func (m *TableReference) XXX_Unmarshal(b []byte) error
- type ThrottleStatus
- func (*ThrottleStatus) Descriptor() ([]byte, []int)
- func (m *ThrottleStatus) GetThrottlePercent() int32
- func (*ThrottleStatus) ProtoMessage()
- func (m *ThrottleStatus) Reset()
- func (m *ThrottleStatus) String() string
- func (m *ThrottleStatus) XXX_DiscardUnknown()
- func (m *ThrottleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
- func (m *ThrottleStatus) XXX_Merge(src proto.Message)
- func (m *ThrottleStatus) XXX_Size() int
- func (m *ThrottleStatus) XXX_Unmarshal(b []byte) error
- type UnimplementedBigQueryStorageServer
- func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)
- func (*UnimplementedBigQueryStorageServer) CreateReadSession(ctx context.Context, req *CreateReadSessionRequest) (*ReadSession, error)
- func (*UnimplementedBigQueryStorageServer) FinalizeStream(ctx context.Context, req *FinalizeStreamRequest) (*empty.Empty, error)
- func (*UnimplementedBigQueryStorageServer) ReadRows(req *ReadRowsRequest, srv BigQueryStorage_ReadRowsServer) error
- func (*UnimplementedBigQueryStorageServer) SplitReadStream(ctx context.Context, req *SplitReadStreamRequest) (*SplitReadStreamResponse, error)
Constants ¶
This section is empty.
Variables ¶
var DataFormat_name = map[int32]string{
0: "DATA_FORMAT_UNSPECIFIED",
1: "AVRO",
3: "ARROW",
}
var DataFormat_value = map[string]int32{
"DATA_FORMAT_UNSPECIFIED": 0,
"AVRO": 1,
"ARROW": 3,
}
var ShardingStrategy_name = map[int32]string{
0: "SHARDING_STRATEGY_UNSPECIFIED",
1: "LIQUID",
2: "BALANCED",
}
var ShardingStrategy_value = map[string]int32{
"SHARDING_STRATEGY_UNSPECIFIED": 0,
"LIQUID": 1,
"BALANCED": 2,
}
Functions ¶
func RegisterBigQueryStorageServer ¶
func RegisterBigQueryStorageServer(s *grpc.Server, srv BigQueryStorageServer)
Types ¶
type ArrowRecordBatch ¶
type ArrowRecordBatch struct { // IPC serialized Arrow RecordBatch. SerializedRecordBatch []byte `` /* 126-byte string literal not displayed */ // The count of rows in the returning block. RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Arrow RecordBatch.
func (*ArrowRecordBatch) Descriptor ¶
func (*ArrowRecordBatch) Descriptor() ([]byte, []int)
func (*ArrowRecordBatch) GetRowCount ¶
func (m *ArrowRecordBatch) GetRowCount() int64
func (*ArrowRecordBatch) GetSerializedRecordBatch ¶
func (m *ArrowRecordBatch) GetSerializedRecordBatch() []byte
func (*ArrowRecordBatch) ProtoMessage ¶
func (*ArrowRecordBatch) ProtoMessage()
func (*ArrowRecordBatch) Reset ¶
func (m *ArrowRecordBatch) Reset()
func (*ArrowRecordBatch) String ¶
func (m *ArrowRecordBatch) String() string
func (*ArrowRecordBatch) XXX_DiscardUnknown ¶
func (m *ArrowRecordBatch) XXX_DiscardUnknown()
func (*ArrowRecordBatch) XXX_Marshal ¶
func (m *ArrowRecordBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ArrowRecordBatch) XXX_Merge ¶
func (m *ArrowRecordBatch) XXX_Merge(src proto.Message)
func (*ArrowRecordBatch) XXX_Size ¶
func (m *ArrowRecordBatch) XXX_Size() int
func (*ArrowRecordBatch) XXX_Unmarshal ¶
func (m *ArrowRecordBatch) XXX_Unmarshal(b []byte) error
type ArrowSchema ¶
type ArrowSchema struct { // IPC serialized Arrow schema. SerializedSchema []byte `protobuf:"bytes,1,opt,name=serialized_schema,json=serializedSchema,proto3" json:"serialized_schema,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Arrow schema.
func (*ArrowSchema) Descriptor ¶
func (*ArrowSchema) Descriptor() ([]byte, []int)
func (*ArrowSchema) GetSerializedSchema ¶
func (m *ArrowSchema) GetSerializedSchema() []byte
func (*ArrowSchema) ProtoMessage ¶
func (*ArrowSchema) ProtoMessage()
func (*ArrowSchema) Reset ¶
func (m *ArrowSchema) Reset()
func (*ArrowSchema) String ¶
func (m *ArrowSchema) String() string
func (*ArrowSchema) XXX_DiscardUnknown ¶
func (m *ArrowSchema) XXX_DiscardUnknown()
func (*ArrowSchema) XXX_Marshal ¶
func (m *ArrowSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ArrowSchema) XXX_Merge ¶
func (m *ArrowSchema) XXX_Merge(src proto.Message)
func (*ArrowSchema) XXX_Size ¶
func (m *ArrowSchema) XXX_Size() int
func (*ArrowSchema) XXX_Unmarshal ¶
func (m *ArrowSchema) XXX_Unmarshal(b []byte) error
type AvroRows ¶
type AvroRows struct { // Binary serialized rows in a block. SerializedBinaryRows []byte `protobuf:"bytes,1,opt,name=serialized_binary_rows,json=serializedBinaryRows,proto3" json:"serialized_binary_rows,omitempty"` // The count of rows in the returning block. RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Avro rows.
func (*AvroRows) Descriptor ¶
func (*AvroRows) GetRowCount ¶
func (*AvroRows) GetSerializedBinaryRows ¶
func (*AvroRows) ProtoMessage ¶
func (*AvroRows) ProtoMessage()
func (*AvroRows) XXX_DiscardUnknown ¶
func (m *AvroRows) XXX_DiscardUnknown()
func (*AvroRows) XXX_Marshal ¶
func (*AvroRows) XXX_Unmarshal ¶
type AvroSchema ¶
type AvroSchema struct { // Json serialized schema, as described at // https://avro.apache.org/docs/1.8.1/spec.html Schema string `protobuf:"bytes,1,opt,name=schema,proto3" json:"schema,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Avro schema.
func (*AvroSchema) Descriptor ¶
func (*AvroSchema) Descriptor() ([]byte, []int)
func (*AvroSchema) GetSchema ¶
func (m *AvroSchema) GetSchema() string
func (*AvroSchema) ProtoMessage ¶
func (*AvroSchema) ProtoMessage()
func (*AvroSchema) Reset ¶
func (m *AvroSchema) Reset()
func (*AvroSchema) String ¶
func (m *AvroSchema) String() string
func (*AvroSchema) XXX_DiscardUnknown ¶
func (m *AvroSchema) XXX_DiscardUnknown()
func (*AvroSchema) XXX_Marshal ¶
func (m *AvroSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*AvroSchema) XXX_Merge ¶
func (m *AvroSchema) XXX_Merge(src proto.Message)
func (*AvroSchema) XXX_Size ¶
func (m *AvroSchema) XXX_Size() int
func (*AvroSchema) XXX_Unmarshal ¶
func (m *AvroSchema) XXX_Unmarshal(b []byte) error
type BatchCreateReadSessionStreamsRequest ¶
type BatchCreateReadSessionStreamsRequest struct { // Required. Must be a non-expired session obtained from a call to // CreateReadSession. Only the name field needs to be set. Session *ReadSession `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` // Required. Number of new streams requested. Must be positive. // Number of added streams may be less than this, see CreateReadSessionRequest // for more information. RequestedStreams int32 `protobuf:"varint,2,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Information needed to request additional streams for an established read session.
func (*BatchCreateReadSessionStreamsRequest) Descriptor ¶
func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int)
func (*BatchCreateReadSessionStreamsRequest) GetRequestedStreams ¶
func (m *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32
func (*BatchCreateReadSessionStreamsRequest) GetSession ¶
func (m *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSession
func (*BatchCreateReadSessionStreamsRequest) ProtoMessage ¶
func (*BatchCreateReadSessionStreamsRequest) ProtoMessage()
func (*BatchCreateReadSessionStreamsRequest) Reset ¶
func (m *BatchCreateReadSessionStreamsRequest) Reset()
func (*BatchCreateReadSessionStreamsRequest) String ¶
func (m *BatchCreateReadSessionStreamsRequest) String() string
func (*BatchCreateReadSessionStreamsRequest) XXX_DiscardUnknown ¶
func (m *BatchCreateReadSessionStreamsRequest) XXX_DiscardUnknown()
func (*BatchCreateReadSessionStreamsRequest) XXX_Marshal ¶
func (m *BatchCreateReadSessionStreamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*BatchCreateReadSessionStreamsRequest) XXX_Merge ¶
func (m *BatchCreateReadSessionStreamsRequest) XXX_Merge(src proto.Message)
func (*BatchCreateReadSessionStreamsRequest) XXX_Size ¶
func (m *BatchCreateReadSessionStreamsRequest) XXX_Size() int
func (*BatchCreateReadSessionStreamsRequest) XXX_Unmarshal ¶
func (m *BatchCreateReadSessionStreamsRequest) XXX_Unmarshal(b []byte) error
type BatchCreateReadSessionStreamsResponse ¶
type BatchCreateReadSessionStreamsResponse struct { // Newly added streams. Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
The response from `BatchCreateReadSessionStreams` returns the stream identifiers for the newly created streams.
func (*BatchCreateReadSessionStreamsResponse) Descriptor ¶
func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int)
func (*BatchCreateReadSessionStreamsResponse) GetStreams ¶
func (m *BatchCreateReadSessionStreamsResponse) GetStreams() []*Stream
func (*BatchCreateReadSessionStreamsResponse) ProtoMessage ¶
func (*BatchCreateReadSessionStreamsResponse) ProtoMessage()
func (*BatchCreateReadSessionStreamsResponse) Reset ¶
func (m *BatchCreateReadSessionStreamsResponse) Reset()
func (*BatchCreateReadSessionStreamsResponse) String ¶
func (m *BatchCreateReadSessionStreamsResponse) String() string
func (*BatchCreateReadSessionStreamsResponse) XXX_DiscardUnknown ¶
func (m *BatchCreateReadSessionStreamsResponse) XXX_DiscardUnknown()
func (*BatchCreateReadSessionStreamsResponse) XXX_Marshal ¶
func (m *BatchCreateReadSessionStreamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*BatchCreateReadSessionStreamsResponse) XXX_Merge ¶
func (m *BatchCreateReadSessionStreamsResponse) XXX_Merge(src proto.Message)
func (*BatchCreateReadSessionStreamsResponse) XXX_Size ¶
func (m *BatchCreateReadSessionStreamsResponse) XXX_Size() int
func (*BatchCreateReadSessionStreamsResponse) XXX_Unmarshal ¶
func (m *BatchCreateReadSessionStreamsResponse) XXX_Unmarshal(b []byte) error
type BigQueryStorageClient ¶
type BigQueryStorageClient interface { // Creates a new read session. A read session divides the contents of a // BigQuery table into one or more streams, which can then be used to read // data from the table. The read session also specifies properties of the // data to be read, such as a list of columns or a push-down filter describing // the rows to be returned. // // A particular row can be read by at most one stream. When the caller has // reached the end of each stream in the session, then all the data in the // table has been read. // // Read sessions automatically expire 24 hours after they are created and do // not require manual clean-up by the caller. CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) // Reads rows from the table in the format prescribed by the read session. // Each response contains one or more table rows, up to a maximum of 10 MiB // per response; read requests which attempt to read individual rows larger // than this will fail. // // Each request also returns a set of stream statistics reflecting the // estimated total number of rows in the read stream. This number is computed // based on the total table size and the number of active streams in the read // session, and may change as other streams continue to read data. ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error) // Creates additional streams for a ReadSession. This API can be used to // dynamically adjust the parallelism of a batch processing task upwards by // adding additional workers. BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error) // Triggers the graceful termination of a single stream in a ReadSession. This // API can be used to dynamically adjust the parallelism of a batch processing // task downwards without losing data. // // This API does not delete the stream -- it remains visible in the // ReadSession, and any data processed by the stream is not released to other // streams. However, no additional data will be assigned to the stream once // this call completes. Callers must continue reading data on the stream until // the end of the stream is reached so that data which has already been // assigned to the stream will be processed. // // This method will return an error if there are no other live streams // in the Session, or if SplitReadStream() has been called on the given // Stream. FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*empty.Empty, error) // Splits a given read stream into two Streams. These streams are referred to // as the primary and the residual of the split. The original stream can still // be read from in the same manner as before. Both of the returned streams can // also be read from, and the total rows return by both child streams will be // the same as the rows read from the original stream. // // Moreover, the two child streams will be allocated back to back in the // original Stream. Concretely, it is guaranteed that for streams Original, // Primary, and Residual, that Original[0-j] = Primary[0-j] and // Original[j-n] = Residual[0-m] once the streams have been read to // completion. // // This method is guaranteed to be idempotent. SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) }
BigQueryStorageClient is the client API for BigQueryStorage service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBigQueryStorageClient ¶
func NewBigQueryStorageClient(cc grpc.ClientConnInterface) BigQueryStorageClient
type BigQueryStorageServer ¶
type BigQueryStorageServer interface { // Creates a new read session. A read session divides the contents of a // BigQuery table into one or more streams, which can then be used to read // data from the table. The read session also specifies properties of the // data to be read, such as a list of columns or a push-down filter describing // the rows to be returned. // // A particular row can be read by at most one stream. When the caller has // reached the end of each stream in the session, then all the data in the // table has been read. // // Read sessions automatically expire 24 hours after they are created and do // not require manual clean-up by the caller. CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error) // Reads rows from the table in the format prescribed by the read session. // Each response contains one or more table rows, up to a maximum of 10 MiB // per response; read requests which attempt to read individual rows larger // than this will fail. // // Each request also returns a set of stream statistics reflecting the // estimated total number of rows in the read stream. This number is computed // based on the total table size and the number of active streams in the read // session, and may change as other streams continue to read data. ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error // Creates additional streams for a ReadSession. This API can be used to // dynamically adjust the parallelism of a batch processing task upwards by // adding additional workers. BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error) // Triggers the graceful termination of a single stream in a ReadSession. This // API can be used to dynamically adjust the parallelism of a batch processing // task downwards without losing data. // // This API does not delete the stream -- it remains visible in the // ReadSession, and any data processed by the stream is not released to other // streams. However, no additional data will be assigned to the stream once // this call completes. Callers must continue reading data on the stream until // the end of the stream is reached so that data which has already been // assigned to the stream will be processed. // // This method will return an error if there are no other live streams // in the Session, or if SplitReadStream() has been called on the given // Stream. FinalizeStream(context.Context, *FinalizeStreamRequest) (*empty.Empty, error) // Splits a given read stream into two Streams. These streams are referred to // as the primary and the residual of the split. The original stream can still // be read from in the same manner as before. Both of the returned streams can // also be read from, and the total rows return by both child streams will be // the same as the rows read from the original stream. // // Moreover, the two child streams will be allocated back to back in the // original Stream. Concretely, it is guaranteed that for streams Original, // Primary, and Residual, that Original[0-j] = Primary[0-j] and // Original[j-n] = Residual[0-m] once the streams have been read to // completion. // // This method is guaranteed to be idempotent. SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error) }
BigQueryStorageServer is the server API for BigQueryStorage service.
type BigQueryStorage_ReadRowsClient ¶
type BigQueryStorage_ReadRowsClient interface { Recv() (*ReadRowsResponse, error) grpc.ClientStream }
type BigQueryStorage_ReadRowsServer ¶
type BigQueryStorage_ReadRowsServer interface { Send(*ReadRowsResponse) error grpc.ServerStream }
type CreateReadSessionRequest ¶
type CreateReadSessionRequest struct { // Required. Reference to the table to read. TableReference *TableReference `protobuf:"bytes,1,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` // Required. String of the form `projects/{project_id}` indicating the // project this ReadSession is associated with. This is the project that will // be billed for usage. Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"` // Any modifiers to the Table (e.g. snapshot timestamp). TableModifiers *TableModifiers `protobuf:"bytes,2,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` // Initial number of streams. If unset or 0, we will // provide a value of streams so as to produce reasonable throughput. Must be // non-negative. The number of streams may be lower than the requested number, // depending on the amount parallelism that is reasonable for the table and // the maximum amount of parallelism allowed by the system. // // Streams must be read starting from offset 0. RequestedStreams int32 `protobuf:"varint,3,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` // Read options for this session (e.g. column selection, filters). ReadOptions *TableReadOptions `protobuf:"bytes,4,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"` // Data output format. Currently default to Avro. Format DataFormat `protobuf:"varint,5,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta1.DataFormat" json:"format,omitempty"` // The strategy to use for distributing data among multiple streams. Currently // defaults to liquid sharding. ShardingStrategy ShardingStrategy `` /* 170-byte string literal not displayed */ XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Creates a new read session, which may include additional options such as requested parallelism, projection filters and constraints.
func (*CreateReadSessionRequest) Descriptor ¶
func (*CreateReadSessionRequest) Descriptor() ([]byte, []int)
func (*CreateReadSessionRequest) GetFormat ¶
func (m *CreateReadSessionRequest) GetFormat() DataFormat
func (*CreateReadSessionRequest) GetParent ¶
func (m *CreateReadSessionRequest) GetParent() string
func (*CreateReadSessionRequest) GetReadOptions ¶
func (m *CreateReadSessionRequest) GetReadOptions() *TableReadOptions
func (*CreateReadSessionRequest) GetRequestedStreams ¶
func (m *CreateReadSessionRequest) GetRequestedStreams() int32
func (*CreateReadSessionRequest) GetShardingStrategy ¶
func (m *CreateReadSessionRequest) GetShardingStrategy() ShardingStrategy
func (*CreateReadSessionRequest) GetTableModifiers ¶
func (m *CreateReadSessionRequest) GetTableModifiers() *TableModifiers
func (*CreateReadSessionRequest) GetTableReference ¶
func (m *CreateReadSessionRequest) GetTableReference() *TableReference
func (*CreateReadSessionRequest) ProtoMessage ¶
func (*CreateReadSessionRequest) ProtoMessage()
func (*CreateReadSessionRequest) Reset ¶
func (m *CreateReadSessionRequest) Reset()
func (*CreateReadSessionRequest) String ¶
func (m *CreateReadSessionRequest) String() string
func (*CreateReadSessionRequest) XXX_DiscardUnknown ¶
func (m *CreateReadSessionRequest) XXX_DiscardUnknown()
func (*CreateReadSessionRequest) XXX_Marshal ¶
func (m *CreateReadSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*CreateReadSessionRequest) XXX_Merge ¶
func (m *CreateReadSessionRequest) XXX_Merge(src proto.Message)
func (*CreateReadSessionRequest) XXX_Size ¶
func (m *CreateReadSessionRequest) XXX_Size() int
func (*CreateReadSessionRequest) XXX_Unmarshal ¶
func (m *CreateReadSessionRequest) XXX_Unmarshal(b []byte) error
type DataFormat ¶
type DataFormat int32
Data format for input or output data.
const ( // Data format is unspecified. DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0 // Avro is a standard open source row based file format. // See https://avro.apache.org/ for more details. DataFormat_AVRO DataFormat = 1 DataFormat_ARROW DataFormat = 3 )
func (DataFormat) EnumDescriptor ¶
func (DataFormat) EnumDescriptor() ([]byte, []int)
func (DataFormat) String ¶
func (x DataFormat) String() string
type FinalizeStreamRequest ¶
type FinalizeStreamRequest struct { // Required. Stream to finalize. Stream *Stream `protobuf:"bytes,2,opt,name=stream,proto3" json:"stream,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Request information for invoking `FinalizeStream`.
func (*FinalizeStreamRequest) Descriptor ¶
func (*FinalizeStreamRequest) Descriptor() ([]byte, []int)
func (*FinalizeStreamRequest) GetStream ¶
func (m *FinalizeStreamRequest) GetStream() *Stream
func (*FinalizeStreamRequest) ProtoMessage ¶
func (*FinalizeStreamRequest) ProtoMessage()
func (*FinalizeStreamRequest) Reset ¶
func (m *FinalizeStreamRequest) Reset()
func (*FinalizeStreamRequest) String ¶
func (m *FinalizeStreamRequest) String() string
func (*FinalizeStreamRequest) XXX_DiscardUnknown ¶
func (m *FinalizeStreamRequest) XXX_DiscardUnknown()
func (*FinalizeStreamRequest) XXX_Marshal ¶
func (m *FinalizeStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*FinalizeStreamRequest) XXX_Merge ¶
func (m *FinalizeStreamRequest) XXX_Merge(src proto.Message)
func (*FinalizeStreamRequest) XXX_Size ¶
func (m *FinalizeStreamRequest) XXX_Size() int
func (*FinalizeStreamRequest) XXX_Unmarshal ¶
func (m *FinalizeStreamRequest) XXX_Unmarshal(b []byte) error
type Progress ¶
type Progress struct { // The fraction of rows assigned to the stream that have been processed by the // server so far, not including the rows in the current response message. // // This value, along with `at_response_end`, can be used to interpolate the // progress made as the rows in the message are being processed using the // following formula: `at_response_start + (at_response_end - // at_response_start) * rows_processed_from_response / rows_in_response`. // // Note that if a filter is provided, the `at_response_end` value of the // previous response may not necessarily be equal to the `at_response_start` // value of the current response. AtResponseStart float32 `protobuf:"fixed32,1,opt,name=at_response_start,json=atResponseStart,proto3" json:"at_response_start,omitempty"` // Similar to `at_response_start`, except that this value includes the rows in // the current response. AtResponseEnd float32 `protobuf:"fixed32,2,opt,name=at_response_end,json=atResponseEnd,proto3" json:"at_response_end,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
func (*Progress) Descriptor ¶
func (*Progress) GetAtResponseEnd ¶
func (*Progress) GetAtResponseStart ¶
func (*Progress) ProtoMessage ¶
func (*Progress) ProtoMessage()
func (*Progress) XXX_DiscardUnknown ¶
func (m *Progress) XXX_DiscardUnknown()
func (*Progress) XXX_Marshal ¶
func (*Progress) XXX_Unmarshal ¶
type ReadRowsRequest ¶
type ReadRowsRequest struct { // Required. Identifier of the position in the stream to start reading from. // The offset requested must be less than the last row read from ReadRows. // Requesting a larger offset is undefined. ReadPosition *StreamPosition `protobuf:"bytes,1,opt,name=read_position,json=readPosition,proto3" json:"read_position,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Requesting row data via `ReadRows` must provide Stream position information.
func (*ReadRowsRequest) Descriptor ¶
func (*ReadRowsRequest) Descriptor() ([]byte, []int)
func (*ReadRowsRequest) GetReadPosition ¶
func (m *ReadRowsRequest) GetReadPosition() *StreamPosition
func (*ReadRowsRequest) ProtoMessage ¶
func (*ReadRowsRequest) ProtoMessage()
func (*ReadRowsRequest) Reset ¶
func (m *ReadRowsRequest) Reset()
func (*ReadRowsRequest) String ¶
func (m *ReadRowsRequest) String() string
func (*ReadRowsRequest) XXX_DiscardUnknown ¶
func (m *ReadRowsRequest) XXX_DiscardUnknown()
func (*ReadRowsRequest) XXX_Marshal ¶
func (m *ReadRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ReadRowsRequest) XXX_Merge ¶
func (m *ReadRowsRequest) XXX_Merge(src proto.Message)
func (*ReadRowsRequest) XXX_Size ¶
func (m *ReadRowsRequest) XXX_Size() int
func (*ReadRowsRequest) XXX_Unmarshal ¶
func (m *ReadRowsRequest) XXX_Unmarshal(b []byte) error
type ReadRowsResponse ¶
type ReadRowsResponse struct { // Row data is returned in format specified during session creation. // // Types that are valid to be assigned to Rows: // *ReadRowsResponse_AvroRows // *ReadRowsResponse_ArrowRecordBatch Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"` // Number of serialized rows in the rows block. This value is recorded here, // in addition to the row_count values in the output-specific messages in // `rows`, so that code which needs to record progress through the stream can // do so in an output format-independent way. RowCount int64 `protobuf:"varint,6,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` // Estimated stream statistics. Status *StreamStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` // Throttling status. If unset, the latest response still describes // the current throttling status. ThrottleStatus *ThrottleStatus `protobuf:"bytes,5,opt,name=throttle_status,json=throttleStatus,proto3" json:"throttle_status,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Response from calling `ReadRows` may include row data, progress and throttling information.
func (*ReadRowsResponse) Descriptor ¶
func (*ReadRowsResponse) Descriptor() ([]byte, []int)
func (*ReadRowsResponse) GetArrowRecordBatch ¶
func (m *ReadRowsResponse) GetArrowRecordBatch() *ArrowRecordBatch
func (*ReadRowsResponse) GetAvroRows ¶
func (m *ReadRowsResponse) GetAvroRows() *AvroRows
func (*ReadRowsResponse) GetRowCount ¶
func (m *ReadRowsResponse) GetRowCount() int64
func (*ReadRowsResponse) GetRows ¶
func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows
func (*ReadRowsResponse) GetStatus ¶
func (m *ReadRowsResponse) GetStatus() *StreamStatus
func (*ReadRowsResponse) GetThrottleStatus ¶
func (m *ReadRowsResponse) GetThrottleStatus() *ThrottleStatus
func (*ReadRowsResponse) ProtoMessage ¶
func (*ReadRowsResponse) ProtoMessage()
func (*ReadRowsResponse) Reset ¶
func (m *ReadRowsResponse) Reset()
func (*ReadRowsResponse) String ¶
func (m *ReadRowsResponse) String() string
func (*ReadRowsResponse) XXX_DiscardUnknown ¶
func (m *ReadRowsResponse) XXX_DiscardUnknown()
func (*ReadRowsResponse) XXX_Marshal ¶
func (m *ReadRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ReadRowsResponse) XXX_Merge ¶
func (m *ReadRowsResponse) XXX_Merge(src proto.Message)
func (*ReadRowsResponse) XXX_OneofWrappers ¶
func (*ReadRowsResponse) XXX_OneofWrappers() []interface{}
XXX_OneofWrappers is for the internal use of the proto package.
func (*ReadRowsResponse) XXX_Size ¶
func (m *ReadRowsResponse) XXX_Size() int
func (*ReadRowsResponse) XXX_Unmarshal ¶
func (m *ReadRowsResponse) XXX_Unmarshal(b []byte) error
type ReadRowsResponse_ArrowRecordBatch ¶
type ReadRowsResponse_ArrowRecordBatch struct {
ArrowRecordBatch *ArrowRecordBatch `protobuf:"bytes,4,opt,name=arrow_record_batch,json=arrowRecordBatch,proto3,oneof"`
}
type ReadRowsResponse_AvroRows ¶
type ReadRowsResponse_AvroRows struct {
AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"`
}
type ReadSession ¶
type ReadSession struct { // Unique identifier for the session, in the form // `projects/{project_id}/locations/{location}/sessions/{session_id}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Time at which the session becomes invalid. After this time, subsequent // requests to read this Session will return errors. ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` // The schema for the read. If read_options.selected_fields is set, the // schema may be different from the table schema as it will only contain // the selected fields. // // Types that are valid to be assigned to Schema: // *ReadSession_AvroSchema // *ReadSession_ArrowSchema Schema isReadSession_Schema `protobuf_oneof:"schema"` // Streams associated with this session. Streams []*Stream `protobuf:"bytes,4,rep,name=streams,proto3" json:"streams,omitempty"` // Table that this ReadSession is reading from. TableReference *TableReference `protobuf:"bytes,7,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` // Any modifiers which are applied when reading from the specified table. TableModifiers *TableModifiers `protobuf:"bytes,8,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` // The strategy to use for distributing data among the streams. ShardingStrategy ShardingStrategy `` /* 170-byte string literal not displayed */ XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Information returned from a `CreateReadSession` request.
func (*ReadSession) Descriptor ¶
func (*ReadSession) Descriptor() ([]byte, []int)
func (*ReadSession) GetArrowSchema ¶
func (m *ReadSession) GetArrowSchema() *ArrowSchema
func (*ReadSession) GetAvroSchema ¶
func (m *ReadSession) GetAvroSchema() *AvroSchema
func (*ReadSession) GetExpireTime ¶
func (m *ReadSession) GetExpireTime() *timestamp.Timestamp
func (*ReadSession) GetName ¶
func (m *ReadSession) GetName() string
func (*ReadSession) GetSchema ¶
func (m *ReadSession) GetSchema() isReadSession_Schema
func (*ReadSession) GetShardingStrategy ¶
func (m *ReadSession) GetShardingStrategy() ShardingStrategy
func (*ReadSession) GetStreams ¶
func (m *ReadSession) GetStreams() []*Stream
func (*ReadSession) GetTableModifiers ¶
func (m *ReadSession) GetTableModifiers() *TableModifiers
func (*ReadSession) GetTableReference ¶
func (m *ReadSession) GetTableReference() *TableReference
func (*ReadSession) ProtoMessage ¶
func (*ReadSession) ProtoMessage()
func (*ReadSession) Reset ¶
func (m *ReadSession) Reset()
func (*ReadSession) String ¶
func (m *ReadSession) String() string
func (*ReadSession) XXX_DiscardUnknown ¶
func (m *ReadSession) XXX_DiscardUnknown()
func (*ReadSession) XXX_Marshal ¶
func (m *ReadSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ReadSession) XXX_Merge ¶
func (m *ReadSession) XXX_Merge(src proto.Message)
func (*ReadSession) XXX_OneofWrappers ¶
func (*ReadSession) XXX_OneofWrappers() []interface{}
XXX_OneofWrappers is for the internal use of the proto package.
func (*ReadSession) XXX_Size ¶
func (m *ReadSession) XXX_Size() int
func (*ReadSession) XXX_Unmarshal ¶
func (m *ReadSession) XXX_Unmarshal(b []byte) error
type ReadSession_ArrowSchema ¶
type ReadSession_ArrowSchema struct {
ArrowSchema *ArrowSchema `protobuf:"bytes,6,opt,name=arrow_schema,json=arrowSchema,proto3,oneof"`
}
type ReadSession_AvroSchema ¶
type ReadSession_AvroSchema struct {
AvroSchema *AvroSchema `protobuf:"bytes,5,opt,name=avro_schema,json=avroSchema,proto3,oneof"`
}
type ShardingStrategy ¶
type ShardingStrategy int32
Strategy for distributing data among multiple streams in a read session.
const ( // Same as LIQUID. ShardingStrategy_SHARDING_STRATEGY_UNSPECIFIED ShardingStrategy = 0 // Assigns data to each stream based on the client's read rate. The faster the // client reads from a stream, the more data is assigned to the stream. In // this strategy, it's possible to read all data from a single stream even if // there are other streams present. ShardingStrategy_LIQUID ShardingStrategy = 1 // Assigns data to each stream such that roughly the same number of rows can // be read from each stream. Because the server-side unit for assigning data // is collections of rows, the API does not guarantee that each stream will // return the same number or rows. Additionally, the limits are enforced based // on the number of pre-filtering rows, so some filters can lead to lopsided // assignments. ShardingStrategy_BALANCED ShardingStrategy = 2 )
func (ShardingStrategy) EnumDescriptor ¶
func (ShardingStrategy) EnumDescriptor() ([]byte, []int)
func (ShardingStrategy) String ¶
func (x ShardingStrategy) String() string
type SplitReadStreamRequest ¶
type SplitReadStreamRequest struct { // Required. Stream to split. OriginalStream *Stream `protobuf:"bytes,1,opt,name=original_stream,json=originalStream,proto3" json:"original_stream,omitempty"` // A value in the range (0.0, 1.0) that specifies the fractional point at // which the original stream should be split. The actual split point is // evaluated on pre-filtered rows, so if a filter is provided, then there is // no guarantee that the division of the rows between the new child streams // will be proportional to this fractional value. Additionally, because the // server-side unit for assigning data is collections of rows, this fraction // will always map to to a data storage boundary on the server side. Fraction float32 `protobuf:"fixed32,2,opt,name=fraction,proto3" json:"fraction,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Request information for `SplitReadStream`.
func (*SplitReadStreamRequest) Descriptor ¶
func (*SplitReadStreamRequest) Descriptor() ([]byte, []int)
func (*SplitReadStreamRequest) GetFraction ¶
func (m *SplitReadStreamRequest) GetFraction() float32
func (*SplitReadStreamRequest) GetOriginalStream ¶
func (m *SplitReadStreamRequest) GetOriginalStream() *Stream
func (*SplitReadStreamRequest) ProtoMessage ¶
func (*SplitReadStreamRequest) ProtoMessage()
func (*SplitReadStreamRequest) Reset ¶
func (m *SplitReadStreamRequest) Reset()
func (*SplitReadStreamRequest) String ¶
func (m *SplitReadStreamRequest) String() string
func (*SplitReadStreamRequest) XXX_DiscardUnknown ¶
func (m *SplitReadStreamRequest) XXX_DiscardUnknown()
func (*SplitReadStreamRequest) XXX_Marshal ¶
func (m *SplitReadStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*SplitReadStreamRequest) XXX_Merge ¶
func (m *SplitReadStreamRequest) XXX_Merge(src proto.Message)
func (*SplitReadStreamRequest) XXX_Size ¶
func (m *SplitReadStreamRequest) XXX_Size() int
func (*SplitReadStreamRequest) XXX_Unmarshal ¶
func (m *SplitReadStreamRequest) XXX_Unmarshal(b []byte) error
type SplitReadStreamResponse ¶
type SplitReadStreamResponse struct { // Primary stream, which contains the beginning portion of // |original_stream|. An empty value indicates that the original stream can no // longer be split. PrimaryStream *Stream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"` // Remainder stream, which contains the tail of |original_stream|. An empty // value indicates that the original stream can no longer be split. RemainderStream *Stream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Response from `SplitReadStream`.
func (*SplitReadStreamResponse) Descriptor ¶
func (*SplitReadStreamResponse) Descriptor() ([]byte, []int)
func (*SplitReadStreamResponse) GetPrimaryStream ¶
func (m *SplitReadStreamResponse) GetPrimaryStream() *Stream
func (*SplitReadStreamResponse) GetRemainderStream ¶
func (m *SplitReadStreamResponse) GetRemainderStream() *Stream
func (*SplitReadStreamResponse) ProtoMessage ¶
func (*SplitReadStreamResponse) ProtoMessage()
func (*SplitReadStreamResponse) Reset ¶
func (m *SplitReadStreamResponse) Reset()
func (*SplitReadStreamResponse) String ¶
func (m *SplitReadStreamResponse) String() string
func (*SplitReadStreamResponse) XXX_DiscardUnknown ¶
func (m *SplitReadStreamResponse) XXX_DiscardUnknown()
func (*SplitReadStreamResponse) XXX_Marshal ¶
func (m *SplitReadStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*SplitReadStreamResponse) XXX_Merge ¶
func (m *SplitReadStreamResponse) XXX_Merge(src proto.Message)
func (*SplitReadStreamResponse) XXX_Size ¶
func (m *SplitReadStreamResponse) XXX_Size() int
func (*SplitReadStreamResponse) XXX_Unmarshal ¶
func (m *SplitReadStreamResponse) XXX_Unmarshal(b []byte) error
type Stream ¶
type Stream struct { // Name of the stream, in the form // `projects/{project_id}/locations/{location}/streams/{stream_id}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Information about a single data stream within a read session.
func (*Stream) Descriptor ¶
func (*Stream) ProtoMessage ¶
func (*Stream) ProtoMessage()
func (*Stream) XXX_DiscardUnknown ¶
func (m *Stream) XXX_DiscardUnknown()
func (*Stream) XXX_Marshal ¶
func (*Stream) XXX_Unmarshal ¶
type StreamPosition ¶
type StreamPosition struct { // Identifier for a given Stream. Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` // Position in the stream. Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Expresses a point within a given stream using an offset position.
func (*StreamPosition) Descriptor ¶
func (*StreamPosition) Descriptor() ([]byte, []int)
func (*StreamPosition) GetOffset ¶
func (m *StreamPosition) GetOffset() int64
func (*StreamPosition) GetStream ¶
func (m *StreamPosition) GetStream() *Stream
func (*StreamPosition) ProtoMessage ¶
func (*StreamPosition) ProtoMessage()
func (*StreamPosition) Reset ¶
func (m *StreamPosition) Reset()
func (*StreamPosition) String ¶
func (m *StreamPosition) String() string
func (*StreamPosition) XXX_DiscardUnknown ¶
func (m *StreamPosition) XXX_DiscardUnknown()
func (*StreamPosition) XXX_Marshal ¶
func (m *StreamPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*StreamPosition) XXX_Merge ¶
func (m *StreamPosition) XXX_Merge(src proto.Message)
func (*StreamPosition) XXX_Size ¶
func (m *StreamPosition) XXX_Size() int
func (*StreamPosition) XXX_Unmarshal ¶
func (m *StreamPosition) XXX_Unmarshal(b []byte) error
type StreamStatus ¶
type StreamStatus struct { // Number of estimated rows in the current stream. May change over time as // different readers in the stream progress at rates which are relatively fast // or slow. EstimatedRowCount int64 `protobuf:"varint,1,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"` // A value in the range [0.0, 1.0] that represents the fraction of rows // assigned to this stream that have been processed by the server. In the // presence of read filters, the server may process more rows than it returns, // so this value reflects progress through the pre-filtering rows. // // This value is only populated for sessions created through the BALANCED // sharding strategy. FractionConsumed float32 `protobuf:"fixed32,2,opt,name=fraction_consumed,json=fractionConsumed,proto3" json:"fraction_consumed,omitempty"` // Represents the progress of the current stream. Progress *Progress `protobuf:"bytes,4,opt,name=progress,proto3" json:"progress,omitempty"` // Whether this stream can be split. For sessions that use the LIQUID sharding // strategy, this value is always false. For BALANCED sessions, this value is // false when enough data have been read such that no more splits are possible // at that point or beyond. For small tables or streams that are the result of // a chain of splits, this value may never be true. IsSplittable bool `protobuf:"varint,3,opt,name=is_splittable,json=isSplittable,proto3" json:"is_splittable,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Progress information for a given Stream.
func (*StreamStatus) Descriptor ¶
func (*StreamStatus) Descriptor() ([]byte, []int)
func (*StreamStatus) GetEstimatedRowCount ¶
func (m *StreamStatus) GetEstimatedRowCount() int64
func (*StreamStatus) GetFractionConsumed ¶
func (m *StreamStatus) GetFractionConsumed() float32
func (*StreamStatus) GetIsSplittable ¶
func (m *StreamStatus) GetIsSplittable() bool
func (*StreamStatus) GetProgress ¶
func (m *StreamStatus) GetProgress() *Progress
func (*StreamStatus) ProtoMessage ¶
func (*StreamStatus) ProtoMessage()
func (*StreamStatus) Reset ¶
func (m *StreamStatus) Reset()
func (*StreamStatus) String ¶
func (m *StreamStatus) String() string
func (*StreamStatus) XXX_DiscardUnknown ¶
func (m *StreamStatus) XXX_DiscardUnknown()
func (*StreamStatus) XXX_Marshal ¶
func (m *StreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*StreamStatus) XXX_Merge ¶
func (m *StreamStatus) XXX_Merge(src proto.Message)
func (*StreamStatus) XXX_Size ¶
func (m *StreamStatus) XXX_Size() int
func (*StreamStatus) XXX_Unmarshal ¶
func (m *StreamStatus) XXX_Unmarshal(b []byte) error
type TableModifiers ¶
type TableModifiers struct { // The snapshot time of the table. If not set, interpreted as now. SnapshotTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
All fields in this message optional.
func (*TableModifiers) Descriptor ¶
func (*TableModifiers) Descriptor() ([]byte, []int)
func (*TableModifiers) GetSnapshotTime ¶
func (m *TableModifiers) GetSnapshotTime() *timestamp.Timestamp
func (*TableModifiers) ProtoMessage ¶
func (*TableModifiers) ProtoMessage()
func (*TableModifiers) Reset ¶
func (m *TableModifiers) Reset()
func (*TableModifiers) String ¶
func (m *TableModifiers) String() string
func (*TableModifiers) XXX_DiscardUnknown ¶
func (m *TableModifiers) XXX_DiscardUnknown()
func (*TableModifiers) XXX_Marshal ¶
func (m *TableModifiers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*TableModifiers) XXX_Merge ¶
func (m *TableModifiers) XXX_Merge(src proto.Message)
func (*TableModifiers) XXX_Size ¶
func (m *TableModifiers) XXX_Size() int
func (*TableModifiers) XXX_Unmarshal ¶
func (m *TableModifiers) XXX_Unmarshal(b []byte) error
type TableReadOptions ¶
type TableReadOptions struct { // Optional. Names of the fields in the table that should be read. If empty, // all fields will be read. If the specified field is a nested field, all the // sub-fields in the field will be selected. The output field order is // unrelated to the order of fields in selected_fields. SelectedFields []string `protobuf:"bytes,1,rep,name=selected_fields,json=selectedFields,proto3" json:"selected_fields,omitempty"` // Optional. SQL text filtering statement, similar to a WHERE clause in // a query. Aggregates are not supported. // // Examples: "int_field > 5" // "date_field = CAST('2014-9-27' as DATE)" // "nullable_field is not NULL" // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" // "numeric_field BETWEEN 1.0 AND 5.0" RowRestriction string `protobuf:"bytes,2,opt,name=row_restriction,json=rowRestriction,proto3" json:"row_restriction,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Options dictating how we read a table.
func (*TableReadOptions) Descriptor ¶
func (*TableReadOptions) Descriptor() ([]byte, []int)
func (*TableReadOptions) GetRowRestriction ¶
func (m *TableReadOptions) GetRowRestriction() string
func (*TableReadOptions) GetSelectedFields ¶
func (m *TableReadOptions) GetSelectedFields() []string
func (*TableReadOptions) ProtoMessage ¶
func (*TableReadOptions) ProtoMessage()
func (*TableReadOptions) Reset ¶
func (m *TableReadOptions) Reset()
func (*TableReadOptions) String ¶
func (m *TableReadOptions) String() string
func (*TableReadOptions) XXX_DiscardUnknown ¶
func (m *TableReadOptions) XXX_DiscardUnknown()
func (*TableReadOptions) XXX_Marshal ¶
func (m *TableReadOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*TableReadOptions) XXX_Merge ¶
func (m *TableReadOptions) XXX_Merge(src proto.Message)
func (*TableReadOptions) XXX_Size ¶
func (m *TableReadOptions) XXX_Size() int
func (*TableReadOptions) XXX_Unmarshal ¶
func (m *TableReadOptions) XXX_Unmarshal(b []byte) error
type TableReference ¶
type TableReference struct { // The assigned project ID of the project. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // The ID of the dataset in the above project. DatasetId string `protobuf:"bytes,2,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` // The ID of the table in the above dataset. TableId string `protobuf:"bytes,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Table reference that includes just the 3 strings needed to identify a table.
func (*TableReference) Descriptor ¶
func (*TableReference) Descriptor() ([]byte, []int)
func (*TableReference) GetDatasetId ¶
func (m *TableReference) GetDatasetId() string
func (*TableReference) GetProjectId ¶
func (m *TableReference) GetProjectId() string
func (*TableReference) GetTableId ¶
func (m *TableReference) GetTableId() string
func (*TableReference) ProtoMessage ¶
func (*TableReference) ProtoMessage()
func (*TableReference) Reset ¶
func (m *TableReference) Reset()
func (*TableReference) String ¶
func (m *TableReference) String() string
func (*TableReference) XXX_DiscardUnknown ¶
func (m *TableReference) XXX_DiscardUnknown()
func (*TableReference) XXX_Marshal ¶
func (m *TableReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*TableReference) XXX_Merge ¶
func (m *TableReference) XXX_Merge(src proto.Message)
func (*TableReference) XXX_Size ¶
func (m *TableReference) XXX_Size() int
func (*TableReference) XXX_Unmarshal ¶
func (m *TableReference) XXX_Unmarshal(b []byte) error
type ThrottleStatus ¶
type ThrottleStatus struct { // How much this connection is being throttled. // 0 is no throttling, 100 is completely throttled. ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` }
Information on if the current connection is being throttled.
func (*ThrottleStatus) Descriptor ¶
func (*ThrottleStatus) Descriptor() ([]byte, []int)
func (*ThrottleStatus) GetThrottlePercent ¶
func (m *ThrottleStatus) GetThrottlePercent() int32
func (*ThrottleStatus) ProtoMessage ¶
func (*ThrottleStatus) ProtoMessage()
func (*ThrottleStatus) Reset ¶
func (m *ThrottleStatus) Reset()
func (*ThrottleStatus) String ¶
func (m *ThrottleStatus) String() string
func (*ThrottleStatus) XXX_DiscardUnknown ¶
func (m *ThrottleStatus) XXX_DiscardUnknown()
func (*ThrottleStatus) XXX_Marshal ¶
func (m *ThrottleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
func (*ThrottleStatus) XXX_Merge ¶
func (m *ThrottleStatus) XXX_Merge(src proto.Message)
func (*ThrottleStatus) XXX_Size ¶
func (m *ThrottleStatus) XXX_Size() int
func (*ThrottleStatus) XXX_Unmarshal ¶
func (m *ThrottleStatus) XXX_Unmarshal(b []byte) error
type UnimplementedBigQueryStorageServer ¶
type UnimplementedBigQueryStorageServer struct { }
UnimplementedBigQueryStorageServer can be embedded to have forward compatible implementations.
func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams ¶
func (*UnimplementedBigQueryStorageServer) BatchCreateReadSessionStreams(ctx context.Context, req *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error)
func (*UnimplementedBigQueryStorageServer) CreateReadSession ¶
func (*UnimplementedBigQueryStorageServer) CreateReadSession(ctx context.Context, req *CreateReadSessionRequest) (*ReadSession, error)
func (*UnimplementedBigQueryStorageServer) FinalizeStream ¶
func (*UnimplementedBigQueryStorageServer) FinalizeStream(ctx context.Context, req *FinalizeStreamRequest) (*empty.Empty, error)
func (*UnimplementedBigQueryStorageServer) ReadRows ¶
func (*UnimplementedBigQueryStorageServer) ReadRows(req *ReadRowsRequest, srv BigQueryStorage_ReadRowsServer) error
func (*UnimplementedBigQueryStorageServer) SplitReadStream ¶
func (*UnimplementedBigQueryStorageServer) SplitReadStream(ctx context.Context, req *SplitReadStreamRequest) (*SplitReadStreamResponse, error)