Documentation ¶
Overview ¶
Package distsqlrun is a generated protocol buffer package.
It is generated from these files:
cockroach/pkg/sql/distsqlrun/api.proto cockroach/pkg/sql/distsqlrun/data.proto cockroach/pkg/sql/distsqlrun/processors.proto
It has these top-level messages:
SetupFlowRequest SimpleResponse Expression Ordering StreamEndpointSpec InputSyncSpec OutputRouterSpec DatumInfo StreamHeader StreamData StreamTrailer StreamMessage NoopCoreSpec TableReaderSpan TableReaderSpec JoinReaderSpec SorterSpec EvaluatorSpec DistinctSpec MergeJoinerSpec HashJoinerSpec AggregatorSpec ProcessorCoreUnion ProcessorSpec FlowSpec
Index ¶
- Constants
- Variables
- func GeneratePlanDiagram(flows []FlowSpec, nodeNames []string, w io.Writer) error
- func GetAggregateInfo(fn AggregatorSpec_Func, inputType sqlbase.ColumnType) (aggregateConstructor func() parser.AggregateFunc, ...)
- func ProcessInboundStream(flowCtx *FlowCtx, stream DistSQL_FlowStreamServer, firstMsg *StreamMessage, ...) error
- func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)
- func SetFlowRequestTrace(ctx context.Context, req *SetupFlowRequest) error
- type AggregatorSpec
- func (*AggregatorSpec) Descriptor() ([]byte, []int)
- func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)
- func (m *AggregatorSpec) MarshalTo(dAtA []byte) (int, error)
- func (*AggregatorSpec) ProtoMessage()
- func (m *AggregatorSpec) Reset()
- func (m *AggregatorSpec) Size() (n int)
- func (m *AggregatorSpec) String() string
- func (m *AggregatorSpec) Unmarshal(dAtA []byte) error
- type AggregatorSpec_Aggregation
- func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)
- func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)
- func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)
- func (*AggregatorSpec_Aggregation) ProtoMessage()
- func (m *AggregatorSpec_Aggregation) Reset()
- func (m *AggregatorSpec_Aggregation) Size() (n int)
- func (m *AggregatorSpec_Aggregation) String() string
- func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error
- type AggregatorSpec_Func
- type DatumInfo
- func (*DatumInfo) Descriptor() ([]byte, []int)
- func (m *DatumInfo) Marshal() (dAtA []byte, err error)
- func (m *DatumInfo) MarshalTo(dAtA []byte) (int, error)
- func (*DatumInfo) ProtoMessage()
- func (m *DatumInfo) Reset()
- func (m *DatumInfo) Size() (n int)
- func (m *DatumInfo) String() string
- func (m *DatumInfo) Unmarshal(dAtA []byte) error
- type DistSQLClient
- type DistSQLServer
- type DistSQL_FlowStreamClient
- type DistSQL_FlowStreamServer
- type DistSQL_RunSyncFlowClient
- type DistSQL_RunSyncFlowServer
- type DistinctSpec
- func (*DistinctSpec) Descriptor() ([]byte, []int)
- func (m *DistinctSpec) Marshal() (dAtA []byte, err error)
- func (m *DistinctSpec) MarshalTo(dAtA []byte) (int, error)
- func (*DistinctSpec) ProtoMessage()
- func (m *DistinctSpec) Reset()
- func (m *DistinctSpec) Size() (n int)
- func (m *DistinctSpec) String() string
- func (m *DistinctSpec) Unmarshal(dAtA []byte) error
- type EvaluatorSpec
- func (*EvaluatorSpec) Descriptor() ([]byte, []int)
- func (m *EvaluatorSpec) Marshal() (dAtA []byte, err error)
- func (m *EvaluatorSpec) MarshalTo(dAtA []byte) (int, error)
- func (*EvaluatorSpec) ProtoMessage()
- func (m *EvaluatorSpec) Reset()
- func (m *EvaluatorSpec) Size() (n int)
- func (m *EvaluatorSpec) String() string
- func (m *EvaluatorSpec) Unmarshal(dAtA []byte) error
- type Expression
- func (*Expression) Descriptor() ([]byte, []int)
- func (m *Expression) Marshal() (dAtA []byte, err error)
- func (m *Expression) MarshalTo(dAtA []byte) (int, error)
- func (*Expression) ProtoMessage()
- func (m *Expression) Reset()
- func (m *Expression) Size() (n int)
- func (m *Expression) String() string
- func (m *Expression) Unmarshal(dAtA []byte) error
- type Flow
- type FlowCtx
- type FlowID
- type FlowSpec
- func (*FlowSpec) Descriptor() ([]byte, []int)
- func (m *FlowSpec) Marshal() (dAtA []byte, err error)
- func (m *FlowSpec) MarshalTo(dAtA []byte) (int, error)
- func (*FlowSpec) ProtoMessage()
- func (m *FlowSpec) Reset()
- func (m *FlowSpec) Size() (n int)
- func (m *FlowSpec) String() string
- func (m *FlowSpec) Unmarshal(dAtA []byte) error
- type HashJoinerSpec
- func (*HashJoinerSpec) Descriptor() ([]byte, []int)
- func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)
- func (m *HashJoinerSpec) MarshalTo(dAtA []byte) (int, error)
- func (*HashJoinerSpec) ProtoMessage()
- func (m *HashJoinerSpec) Reset()
- func (m *HashJoinerSpec) Size() (n int)
- func (m *HashJoinerSpec) String() string
- func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error
- type InputSyncSpec
- func (*InputSyncSpec) Descriptor() ([]byte, []int)
- func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)
- func (m *InputSyncSpec) MarshalTo(dAtA []byte) (int, error)
- func (*InputSyncSpec) ProtoMessage()
- func (m *InputSyncSpec) Reset()
- func (m *InputSyncSpec) Size() (n int)
- func (m *InputSyncSpec) String() string
- func (m *InputSyncSpec) Unmarshal(dAtA []byte) error
- type InputSyncSpec_Type
- type JoinReaderSpec
- func (*JoinReaderSpec) Descriptor() ([]byte, []int)
- func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)
- func (m *JoinReaderSpec) MarshalTo(dAtA []byte) (int, error)
- func (*JoinReaderSpec) ProtoMessage()
- func (m *JoinReaderSpec) Reset()
- func (m *JoinReaderSpec) Size() (n int)
- func (m *JoinReaderSpec) String() string
- func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error
- type JoinType
- type MergeJoinerSpec
- func (*MergeJoinerSpec) Descriptor() ([]byte, []int)
- func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)
- func (m *MergeJoinerSpec) MarshalTo(dAtA []byte) (int, error)
- func (*MergeJoinerSpec) ProtoMessage()
- func (m *MergeJoinerSpec) Reset()
- func (m *MergeJoinerSpec) Size() (n int)
- func (m *MergeJoinerSpec) String() string
- func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error
- type MultiplexedRowChannel
- func (mrc *MultiplexedRowChannel) Close(err error)
- func (mrc *MultiplexedRowChannel) Init(numSenders int, types []sqlbase.ColumnType)
- func (mrc *MultiplexedRowChannel) NextRow() (sqlbase.EncDatumRow, error)
- func (mrc *MultiplexedRowChannel) PushRow(row sqlbase.EncDatumRow) bool
- func (mrc *MultiplexedRowChannel) Types() []sqlbase.ColumnType
- type NoopCoreSpec
- func (*NoopCoreSpec) Descriptor() ([]byte, []int)
- func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)
- func (m *NoopCoreSpec) MarshalTo(dAtA []byte) (int, error)
- func (*NoopCoreSpec) ProtoMessage()
- func (m *NoopCoreSpec) Reset()
- func (m *NoopCoreSpec) Size() (n int)
- func (m *NoopCoreSpec) String() string
- func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error
- type Ordering
- func (*Ordering) Descriptor() ([]byte, []int)
- func (m *Ordering) Marshal() (dAtA []byte, err error)
- func (m *Ordering) MarshalTo(dAtA []byte) (int, error)
- func (*Ordering) ProtoMessage()
- func (m *Ordering) Reset()
- func (m *Ordering) Size() (n int)
- func (m *Ordering) String() string
- func (m *Ordering) Unmarshal(dAtA []byte) error
- type Ordering_Column
- func (*Ordering_Column) Descriptor() ([]byte, []int)
- func (m *Ordering_Column) Marshal() (dAtA []byte, err error)
- func (m *Ordering_Column) MarshalTo(dAtA []byte) (int, error)
- func (*Ordering_Column) ProtoMessage()
- func (m *Ordering_Column) Reset()
- func (m *Ordering_Column) Size() (n int)
- func (m *Ordering_Column) String() string
- func (m *Ordering_Column) Unmarshal(dAtA []byte) error
- type Ordering_Column_Direction
- type OutputRouterSpec
- func (*OutputRouterSpec) Descriptor() ([]byte, []int)
- func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)
- func (m *OutputRouterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*OutputRouterSpec) ProtoMessage()
- func (m *OutputRouterSpec) Reset()
- func (m *OutputRouterSpec) Size() (n int)
- func (m *OutputRouterSpec) String() string
- func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error
- type OutputRouterSpec_Type
- type ProcessorCoreUnion
- func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)
- func (this *ProcessorCoreUnion) GetValue() interface{}
- func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)
- func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)
- func (*ProcessorCoreUnion) ProtoMessage()
- func (m *ProcessorCoreUnion) Reset()
- func (this *ProcessorCoreUnion) SetValue(value interface{}) bool
- func (m *ProcessorCoreUnion) Size() (n int)
- func (m *ProcessorCoreUnion) String() string
- func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error
- type ProcessorSpec
- func (*ProcessorSpec) Descriptor() ([]byte, []int)
- func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)
- func (m *ProcessorSpec) MarshalTo(dAtA []byte) (int, error)
- func (*ProcessorSpec) ProtoMessage()
- func (m *ProcessorSpec) Reset()
- func (m *ProcessorSpec) Size() (n int)
- func (m *ProcessorSpec) String() string
- func (m *ProcessorSpec) Unmarshal(dAtA []byte) error
- type RowBuffer
- type RowChannel
- func (rc *RowChannel) Close(err error)
- func (rc *RowChannel) Init(types []sqlbase.ColumnType)
- func (rc *RowChannel) InitWithBufSize(types []sqlbase.ColumnType, chanBufSize int)
- func (rc *RowChannel) NextRow() (sqlbase.EncDatumRow, error)
- func (rc *RowChannel) NoMoreRows()
- func (rc *RowChannel) PushRow(row sqlbase.EncDatumRow) bool
- func (rc *RowChannel) Types() []sqlbase.ColumnType
- type RowReceiver
- type RowSource
- type ServerConfig
- type ServerImpl
- func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error
- func (ds *ServerImpl) RunSyncFlow(req *SetupFlowRequest, stream DistSQL_RunSyncFlowServer) error
- func (ds *ServerImpl) SetupFlow(_ context.Context, req *SetupFlowRequest) (*SimpleResponse, error)
- func (ds *ServerImpl) SetupSyncFlow(ctx context.Context, req *SetupFlowRequest, output RowReceiver) (*Flow, error)
- func (ds *ServerImpl) Start()
- type SetupFlowRequest
- func (*SetupFlowRequest) Descriptor() ([]byte, []int)
- func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)
- func (m *SetupFlowRequest) MarshalTo(dAtA []byte) (int, error)
- func (*SetupFlowRequest) ProtoMessage()
- func (m *SetupFlowRequest) Reset()
- func (m *SetupFlowRequest) Size() (n int)
- func (m *SetupFlowRequest) String() string
- func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error
- type SimpleResponse
- func (*SimpleResponse) Descriptor() ([]byte, []int)
- func (m *SimpleResponse) Marshal() (dAtA []byte, err error)
- func (m *SimpleResponse) MarshalTo(dAtA []byte) (int, error)
- func (*SimpleResponse) ProtoMessage()
- func (m *SimpleResponse) Reset()
- func (m *SimpleResponse) Size() (n int)
- func (m *SimpleResponse) String() string
- func (m *SimpleResponse) Unmarshal(dAtA []byte) error
- type SorterSpec
- func (*SorterSpec) Descriptor() ([]byte, []int)
- func (m *SorterSpec) Marshal() (dAtA []byte, err error)
- func (m *SorterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*SorterSpec) ProtoMessage()
- func (m *SorterSpec) Reset()
- func (m *SorterSpec) Size() (n int)
- func (m *SorterSpec) String() string
- func (m *SorterSpec) Unmarshal(dAtA []byte) error
- type StreamData
- func (*StreamData) Descriptor() ([]byte, []int)
- func (m *StreamData) Marshal() (dAtA []byte, err error)
- func (m *StreamData) MarshalTo(dAtA []byte) (int, error)
- func (*StreamData) ProtoMessage()
- func (m *StreamData) Reset()
- func (m *StreamData) Size() (n int)
- func (m *StreamData) String() string
- func (m *StreamData) Unmarshal(dAtA []byte) error
- type StreamDecoder
- type StreamEncoder
- type StreamEndpointSpec
- func (*StreamEndpointSpec) Descriptor() ([]byte, []int)
- func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)
- func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)
- func (*StreamEndpointSpec) ProtoMessage()
- func (m *StreamEndpointSpec) Reset()
- func (m *StreamEndpointSpec) Size() (n int)
- func (m *StreamEndpointSpec) String() string
- func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error
- type StreamEndpointSpec_Type
- type StreamHeader
- func (*StreamHeader) Descriptor() ([]byte, []int)
- func (m *StreamHeader) Marshal() (dAtA []byte, err error)
- func (m *StreamHeader) MarshalTo(dAtA []byte) (int, error)
- func (*StreamHeader) ProtoMessage()
- func (m *StreamHeader) Reset()
- func (m *StreamHeader) Size() (n int)
- func (m *StreamHeader) String() string
- func (m *StreamHeader) Unmarshal(dAtA []byte) error
- type StreamID
- type StreamMessage
- func (*StreamMessage) Descriptor() ([]byte, []int)
- func (m *StreamMessage) Marshal() (dAtA []byte, err error)
- func (m *StreamMessage) MarshalTo(dAtA []byte) (int, error)
- func (*StreamMessage) ProtoMessage()
- func (m *StreamMessage) Reset()
- func (m *StreamMessage) Size() (n int)
- func (m *StreamMessage) String() string
- func (m *StreamMessage) Unmarshal(dAtA []byte) error
- type StreamMsg
- type StreamTrailer
- func (*StreamTrailer) Descriptor() ([]byte, []int)
- func (m *StreamTrailer) Marshal() (dAtA []byte, err error)
- func (m *StreamTrailer) MarshalTo(dAtA []byte) (int, error)
- func (*StreamTrailer) ProtoMessage()
- func (m *StreamTrailer) Reset()
- func (m *StreamTrailer) Size() (n int)
- func (m *StreamTrailer) String() string
- func (m *StreamTrailer) Unmarshal(dAtA []byte) error
- type TableReaderSpan
- func (*TableReaderSpan) Descriptor() ([]byte, []int)
- func (m *TableReaderSpan) Marshal() (dAtA []byte, err error)
- func (m *TableReaderSpan) MarshalTo(dAtA []byte) (int, error)
- func (*TableReaderSpan) ProtoMessage()
- func (m *TableReaderSpan) Reset()
- func (m *TableReaderSpan) Size() (n int)
- func (m *TableReaderSpan) String() string
- func (m *TableReaderSpan) Unmarshal(dAtA []byte) error
- type TableReaderSpec
- func (*TableReaderSpec) Descriptor() ([]byte, []int)
- func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)
- func (m *TableReaderSpec) MarshalTo(dAtA []byte) (int, error)
- func (*TableReaderSpec) ProtoMessage()
- func (m *TableReaderSpec) Reset()
- func (m *TableReaderSpec) Size() (n int)
- func (m *TableReaderSpec) String() string
- func (m *TableReaderSpec) Unmarshal(dAtA []byte) error
Constants ¶
const ( FlowNotStarted flowStatus = iota FlowRunning FlowFinished )
Flow status indicators.
Variables ¶
var ( ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") )
var ( ErrInvalidLengthData = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowData = fmt.Errorf("proto: integer overflow") )
var ( ErrInvalidLengthProcessors = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowProcessors = fmt.Errorf("proto: integer overflow") )
var AggregatorSpec_Func_name = map[int32]string{
0: "IDENT",
1: "AVG",
2: "BOOL_AND",
3: "BOOL_OR",
4: "CONCAT_AGG",
5: "COUNT",
7: "MAX",
8: "MIN",
9: "STDDEV",
10: "SUM",
11: "SUM_INT",
12: "VARIANCE",
}
var AggregatorSpec_Func_value = map[string]int32{
"IDENT": 0,
"AVG": 1,
"BOOL_AND": 2,
"BOOL_OR": 3,
"CONCAT_AGG": 4,
"COUNT": 5,
"MAX": 7,
"MIN": 8,
"STDDEV": 9,
"SUM": 10,
"SUM_INT": 11,
"VARIANCE": 12,
}
var InputSyncSpec_Type_name = map[int32]string{
0: "UNORDERED",
1: "ORDERED",
}
var InputSyncSpec_Type_value = map[string]int32{
"UNORDERED": 0,
"ORDERED": 1,
}
var JoinType_name = map[int32]string{
0: "INNER",
1: "LEFT_OUTER",
2: "RIGHT_OUTER",
3: "FULL_OUTER",
}
var JoinType_value = map[string]int32{
"INNER": 0,
"LEFT_OUTER": 1,
"RIGHT_OUTER": 2,
"FULL_OUTER": 3,
}
var Ordering_Column_Direction_name = map[int32]string{
0: "ASC",
1: "DESC",
}
var Ordering_Column_Direction_value = map[string]int32{
"ASC": 0,
"DESC": 1,
}
var OutputRouterSpec_Type_name = map[int32]string{
0: "PASS_THROUGH",
1: "MIRROR",
2: "BY_HASH",
3: "BY_RANGE",
}
var OutputRouterSpec_Type_value = map[string]int32{
"PASS_THROUGH": 0,
"MIRROR": 1,
"BY_HASH": 2,
"BY_RANGE": 3,
}
var StreamEndpointSpec_Type_name = map[int32]string{
0: "LOCAL",
1: "REMOTE",
2: "SYNC_RESPONSE",
}
var StreamEndpointSpec_Type_value = map[string]int32{
"LOCAL": 0,
"REMOTE": 1,
"SYNC_RESPONSE": 2,
}
Functions ¶
func GeneratePlanDiagram ¶
GeneratePlanDiagram generates the json data for a flow diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.
func GetAggregateInfo ¶
func GetAggregateInfo( fn AggregatorSpec_Func, inputType sqlbase.ColumnType, ) (aggregateConstructor func() parser.AggregateFunc, returnType sqlbase.ColumnType, err error)
GetAggregateInfo returns the aggregate constructor and the return type for the given aggregate function when applied on the given type.
func ProcessInboundStream ¶
func ProcessInboundStream( flowCtx *FlowCtx, stream DistSQL_FlowStreamServer, firstMsg *StreamMessage, dst RowReceiver, ) error
ProcessInboundStream receives rows from a DistSQL_FlowStreamServer and sends them to a RowReceiver. Optionally processes an initial StreamMessage that was already received (because the first message contains the flow and stream IDs, it needs to be received before we can get here).
func RegisterDistSQLServer ¶
func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)
func SetFlowRequestTrace ¶
func SetFlowRequestTrace(ctx context.Context, req *SetupFlowRequest) error
SetFlowRequestTrace populates req.Trace with the context of the current Span in the context (if any).
Types ¶
type AggregatorSpec ¶
type AggregatorSpec struct { // The group key is a subset of the columns in the input stream schema on the // basis of which we define our groups. GroupCols []uint32 `protobuf:"varint,2,rep,name=group_cols,json=groupCols" json:"group_cols,omitempty"` Aggregations []AggregatorSpec_Aggregation `protobuf:"bytes,3,rep,name=aggregations" json:"aggregations"` }
AggregatorSpec is the specification for an "aggregator" (processor core type, not the logical plan computation stage). An aggregator performs 'aggregation' in the SQL sense in that it groups rows and computes an aggregate for each group. The group is configured using the group key. The aggregator can be configured with one or more aggregation functions.
The aggregator's output schema consists of the group key, plus a configurable subset of the generated aggregated values.
func (*AggregatorSpec) Descriptor ¶
func (*AggregatorSpec) Descriptor() ([]byte, []int)
func (*AggregatorSpec) Marshal ¶
func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)
func (*AggregatorSpec) ProtoMessage ¶
func (*AggregatorSpec) ProtoMessage()
func (*AggregatorSpec) Reset ¶
func (m *AggregatorSpec) Reset()
func (*AggregatorSpec) Size ¶
func (m *AggregatorSpec) Size() (n int)
func (*AggregatorSpec) String ¶
func (m *AggregatorSpec) String() string
func (*AggregatorSpec) Unmarshal ¶
func (m *AggregatorSpec) Unmarshal(dAtA []byte) error
type AggregatorSpec_Aggregation ¶
type AggregatorSpec_Aggregation struct { Func AggregatorSpec_Func `protobuf:"varint,1,opt,name=func,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"func"` // Aggregation functions with distinct = true functions like you would // expect '<FUNC> DISTINCT' to operate, the default behaviour would be // the '<FUNC> ALL' operation. Distinct bool `protobuf:"varint,2,opt,name=distinct" json:"distinct"` // The column index specifies the argument to the aggregator function. ColIdx uint32 `protobuf:"varint,3,opt,name=col_idx,json=colIdx" json:"col_idx"` }
func (*AggregatorSpec_Aggregation) Descriptor ¶
func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)
func (*AggregatorSpec_Aggregation) Marshal ¶
func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)
func (*AggregatorSpec_Aggregation) MarshalTo ¶
func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)
func (*AggregatorSpec_Aggregation) ProtoMessage ¶
func (*AggregatorSpec_Aggregation) ProtoMessage()
func (*AggregatorSpec_Aggregation) Reset ¶
func (m *AggregatorSpec_Aggregation) Reset()
func (*AggregatorSpec_Aggregation) Size ¶
func (m *AggregatorSpec_Aggregation) Size() (n int)
func (*AggregatorSpec_Aggregation) String ¶
func (m *AggregatorSpec_Aggregation) String() string
func (*AggregatorSpec_Aggregation) Unmarshal ¶
func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error
type AggregatorSpec_Func ¶
type AggregatorSpec_Func int32
These mirror the aggregate functions supported by sql/parser. See sql/parser/aggregate_builtins.go.
const ( // The identity function is set to be the default zero-value function, // returning the last value added. AggregatorSpec_IDENT AggregatorSpec_Func = 0 AggregatorSpec_AVG AggregatorSpec_Func = 1 AggregatorSpec_BOOL_AND AggregatorSpec_Func = 2 AggregatorSpec_BOOL_OR AggregatorSpec_Func = 3 AggregatorSpec_CONCAT_AGG AggregatorSpec_Func = 4 AggregatorSpec_COUNT AggregatorSpec_Func = 5 AggregatorSpec_MAX AggregatorSpec_Func = 7 AggregatorSpec_MIN AggregatorSpec_Func = 8 AggregatorSpec_STDDEV AggregatorSpec_Func = 9 AggregatorSpec_SUM AggregatorSpec_Func = 10 AggregatorSpec_SUM_INT AggregatorSpec_Func = 11 AggregatorSpec_VARIANCE AggregatorSpec_Func = 12 )
func (AggregatorSpec_Func) Enum ¶
func (x AggregatorSpec_Func) Enum() *AggregatorSpec_Func
func (AggregatorSpec_Func) EnumDescriptor ¶
func (AggregatorSpec_Func) EnumDescriptor() ([]byte, []int)
func (AggregatorSpec_Func) String ¶
func (x AggregatorSpec_Func) String() string
func (*AggregatorSpec_Func) UnmarshalJSON ¶
func (x *AggregatorSpec_Func) UnmarshalJSON(data []byte) error
type DatumInfo ¶
type DatumInfo struct { Encoding cockroach_sql_sqlbase2.DatumEncoding `protobuf:"varint,1,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"` Type cockroach_sql_sqlbase1.ColumnType `protobuf:"bytes,2,opt,name=type" json:"type"` }
func (*DatumInfo) Descriptor ¶
func (*DatumInfo) ProtoMessage ¶
func (*DatumInfo) ProtoMessage()
type DistSQLClient ¶
type DistSQLClient interface { // RunSyncFlow instantiates a flow and streams back results of that flow. // The request must contain one flow, and that flow must have a single mailbox // of the special sync response type. RunSyncFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (DistSQL_RunSyncFlowClient, error) // SetupFlow instantiates a flow (subgraps of a distributed SQL // computation) on the receiving node. SetupFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // FlowStream is used to push a stream of messages that is part of a flow. The // first message will have a StreamHeader which identifies the flow and the // stream (mailbox). FlowStream(ctx context.Context, opts ...grpc.CallOption) (DistSQL_FlowStreamClient, error) }
func NewDistSQLClient ¶
func NewDistSQLClient(cc *grpc.ClientConn) DistSQLClient
type DistSQLServer ¶
type DistSQLServer interface { // RunSyncFlow instantiates a flow and streams back results of that flow. // The request must contain one flow, and that flow must have a single mailbox // of the special sync response type. RunSyncFlow(*SetupFlowRequest, DistSQL_RunSyncFlowServer) error // SetupFlow instantiates a flow (subgraps of a distributed SQL // computation) on the receiving node. SetupFlow(context.Context, *SetupFlowRequest) (*SimpleResponse, error) // FlowStream is used to push a stream of messages that is part of a flow. The // first message will have a StreamHeader which identifies the flow and the // stream (mailbox). FlowStream(DistSQL_FlowStreamServer) error }
type DistSQL_FlowStreamClient ¶
type DistSQL_FlowStreamClient interface { Send(*StreamMessage) error CloseAndRecv() (*SimpleResponse, error) grpc.ClientStream }
type DistSQL_FlowStreamServer ¶
type DistSQL_FlowStreamServer interface { SendAndClose(*SimpleResponse) error Recv() (*StreamMessage, error) grpc.ServerStream }
type DistSQL_RunSyncFlowClient ¶
type DistSQL_RunSyncFlowClient interface { Recv() (*StreamMessage, error) grpc.ClientStream }
type DistSQL_RunSyncFlowServer ¶
type DistSQL_RunSyncFlowServer interface { Send(*StreamMessage) error grpc.ServerStream }
type DistinctSpec ¶
type DistinctSpec struct { // The ordered columns in the input stream can be optionally specified for // possible optimizations. The specific ordering (ascending/descending) of // the column itself is not important nor is the order in which the columns // are specified. OrderedColumns []uint32 `protobuf:"varint,1,rep,name=ordered_columns,json=orderedColumns" json:"ordered_columns,omitempty"` }
func (*DistinctSpec) Descriptor ¶
func (*DistinctSpec) Descriptor() ([]byte, []int)
func (*DistinctSpec) Marshal ¶
func (m *DistinctSpec) Marshal() (dAtA []byte, err error)
func (*DistinctSpec) ProtoMessage ¶
func (*DistinctSpec) ProtoMessage()
func (*DistinctSpec) Reset ¶
func (m *DistinctSpec) Reset()
func (*DistinctSpec) Size ¶
func (m *DistinctSpec) Size() (n int)
func (*DistinctSpec) String ¶
func (m *DistinctSpec) String() string
func (*DistinctSpec) Unmarshal ¶
func (m *DistinctSpec) Unmarshal(dAtA []byte) error
type EvaluatorSpec ¶
type EvaluatorSpec struct {
Exprs []Expression `protobuf:"bytes,1,rep,name=exprs" json:"exprs"`
}
EvaluatorSpec is the specification for an "evaluator", a fully programmable no-grouping aggregator. It runs a 'program' on each individual row and is restricted to operating on one row of data at a time. The 'program' is a set of expressions evaluated in order, the output schema therefore consists of the results of evaluating each of these expressions on the input row.
TODO(irfansharif): Add support for an optional output filter expression. The filter expression would reference the columns in the row via @1, @2, etc., possibly optimizing if filtering on expressions common to the 'program'.
func (*EvaluatorSpec) Descriptor ¶
func (*EvaluatorSpec) Descriptor() ([]byte, []int)
func (*EvaluatorSpec) Marshal ¶
func (m *EvaluatorSpec) Marshal() (dAtA []byte, err error)
func (*EvaluatorSpec) ProtoMessage ¶
func (*EvaluatorSpec) ProtoMessage()
func (*EvaluatorSpec) Reset ¶
func (m *EvaluatorSpec) Reset()
func (*EvaluatorSpec) Size ¶
func (m *EvaluatorSpec) Size() (n int)
func (*EvaluatorSpec) String ¶
func (m *EvaluatorSpec) String() string
func (*EvaluatorSpec) Unmarshal ¶
func (m *EvaluatorSpec) Unmarshal(dAtA []byte) error
type Expression ¶
type Expression struct { // TODO(radu): TBD how this will be used Version string `protobuf:"bytes,1,opt,name=version" json:"version"` // SQL expressions are passed as a string, with ordinal references // (@1, @2, @3 ..) used for "input" variables. Expr string `protobuf:"bytes,2,opt,name=expr" json:"expr"` }
func (*Expression) Descriptor ¶
func (*Expression) Descriptor() ([]byte, []int)
func (*Expression) Marshal ¶
func (m *Expression) Marshal() (dAtA []byte, err error)
func (*Expression) ProtoMessage ¶
func (*Expression) ProtoMessage()
func (*Expression) Reset ¶
func (m *Expression) Reset()
func (*Expression) Size ¶
func (m *Expression) Size() (n int)
func (*Expression) String ¶
func (m *Expression) String() string
func (*Expression) Unmarshal ¶
func (m *Expression) Unmarshal(dAtA []byte) error
type Flow ¶
type Flow struct { FlowCtx // contains filtered or unexported fields }
Flow represents a flow which consists of processors and streams.
func (*Flow) Cleanup ¶
func (f *Flow) Cleanup()
Cleanup should be called when the flow completes (after all processors and mailboxes exited).
func (*Flow) RunSync ¶
func (f *Flow) RunSync()
RunSync runs the processors in the flow in order (serially), in the same context (no goroutines are spawned).
type FlowID ¶
FlowID identifies a flow. It is most importantly used when setting up streams between nodes.
type FlowSpec ¶
type FlowSpec struct { FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"` Processors []ProcessorSpec `protobuf:"bytes,2,rep,name=processors" json:"processors"` }
FlowSpec describes a "flow" which is a subgraph of a distributed SQL computation consisting of processors and streams.
func (*FlowSpec) Descriptor ¶
func (*FlowSpec) ProtoMessage ¶
func (*FlowSpec) ProtoMessage()
type HashJoinerSpec ¶
type HashJoinerSpec struct { // The join constraints certain columns from the left stream to equal // corresponding columns on the right stream. These must have the same length. LeftEqColumns []uint32 `protobuf:"varint,1,rep,packed,name=left_eq_columns,json=leftEqColumns" json:"left_eq_columns,omitempty"` RightEqColumns []uint32 `protobuf:"varint,2,rep,packed,name=right_eq_columns,json=rightEqColumns" json:"right_eq_columns,omitempty"` // "ON" expression (in addition to the equality constraints captured by the // orderings). Assuming that the left stream has N columns and the right // stream has M columns, in this expression variables @1 to @N refer to // columns of the left stream and variables @N to @(N+M) refer to columns in // the right stream. Expr Expression `protobuf:"bytes,5,opt,name=expr" json:"expr"` Type JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"` // Columns for the output stream. Assuming that the left stream has N columns // and the right stream has M columns, column indices 0 to (N-1) refer to left // stream columns and indices N to (N+M-1) refer to right stream columns. OutputColumns []uint32 `protobuf:"varint,7,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"` }
HashJoinerSpec is the specification for a hash join processor. The processor has two inputs and one output.
The processor works by reading the entire right input and putting it in a hash table. Thus, there is no guarantee on the ordering of results that stem only from the right input (in the case of RIGHT_OUTER, FULL_OUTER). However, it is guaranteed that results that involve the left stream preserve the ordering; i.e. all results that stem from left row (i) precede results that stem from left row (i+1).
func (*HashJoinerSpec) Descriptor ¶
func (*HashJoinerSpec) Descriptor() ([]byte, []int)
func (*HashJoinerSpec) Marshal ¶
func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)
func (*HashJoinerSpec) ProtoMessage ¶
func (*HashJoinerSpec) ProtoMessage()
func (*HashJoinerSpec) Reset ¶
func (m *HashJoinerSpec) Reset()
func (*HashJoinerSpec) Size ¶
func (m *HashJoinerSpec) Size() (n int)
func (*HashJoinerSpec) String ¶
func (m *HashJoinerSpec) String() string
func (*HashJoinerSpec) Unmarshal ¶
func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error
type InputSyncSpec ¶
type InputSyncSpec struct { Type InputSyncSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.InputSyncSpec_Type" json:"type"` Ordering Ordering `protobuf:"bytes,2,opt,name=ordering" json:"ordering"` Streams []StreamEndpointSpec `protobuf:"bytes,3,rep,name=streams" json:"streams"` // Schema for the streams entering this synchronizer. ColumnTypes []cockroach_sql_sqlbase1.ColumnType `protobuf:"bytes,4,rep,name=column_types,json=columnTypes" json:"column_types"` }
InputSyncSpec is the specification for an input synchronizer; it decides how to interleave rows from multiple input streams.
func (*InputSyncSpec) Descriptor ¶
func (*InputSyncSpec) Descriptor() ([]byte, []int)
func (*InputSyncSpec) Marshal ¶
func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)
func (*InputSyncSpec) ProtoMessage ¶
func (*InputSyncSpec) ProtoMessage()
func (*InputSyncSpec) Reset ¶
func (m *InputSyncSpec) Reset()
func (*InputSyncSpec) Size ¶
func (m *InputSyncSpec) Size() (n int)
func (*InputSyncSpec) String ¶
func (m *InputSyncSpec) String() string
func (*InputSyncSpec) Unmarshal ¶
func (m *InputSyncSpec) Unmarshal(dAtA []byte) error
type InputSyncSpec_Type ¶
type InputSyncSpec_Type int32
const ( // Rows from the input streams are interleaved arbitrarily. InputSyncSpec_UNORDERED InputSyncSpec_Type = 0 // The input streams are guaranteed to be ordered according to the column // ordering field; rows from the streams are interleaved to preserve that // ordering. InputSyncSpec_ORDERED InputSyncSpec_Type = 1 )
func (InputSyncSpec_Type) Enum ¶
func (x InputSyncSpec_Type) Enum() *InputSyncSpec_Type
func (InputSyncSpec_Type) EnumDescriptor ¶
func (InputSyncSpec_Type) EnumDescriptor() ([]byte, []int)
func (InputSyncSpec_Type) String ¶
func (x InputSyncSpec_Type) String() string
func (*InputSyncSpec_Type) UnmarshalJSON ¶
func (x *InputSyncSpec_Type) UnmarshalJSON(data []byte) error
type JoinReaderSpec ¶
type JoinReaderSpec struct { Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"` // If 0, we use the primary index; each row in the input stream has a value // for each primary key. // TODO(radu): figure out the correct semantics when joining with an index. IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"` // The filter expression references the columns in the table (table.columns) // via ordinal references (@1, @2, etc). If a secondary index is used, the // columns that are not available as part of the index cannot be referenced. Filter Expression `protobuf:"bytes,3,opt,name=filter" json:"filter"` // The table reader will only produce values for these columns, referenced by // their indices in table.columns. OutputColumns []uint32 `protobuf:"varint,4,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"` }
JoinReaderSpec is the specification for a "join reader". A join reader performs KV operations to retrieve specific rows that correspond to the values in the input stream (join by lookup).
func (*JoinReaderSpec) Descriptor ¶
func (*JoinReaderSpec) Descriptor() ([]byte, []int)
func (*JoinReaderSpec) Marshal ¶
func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)
func (*JoinReaderSpec) ProtoMessage ¶
func (*JoinReaderSpec) ProtoMessage()
func (*JoinReaderSpec) Reset ¶
func (m *JoinReaderSpec) Reset()
func (*JoinReaderSpec) Size ¶
func (m *JoinReaderSpec) Size() (n int)
func (*JoinReaderSpec) String ¶
func (m *JoinReaderSpec) String() string
func (*JoinReaderSpec) Unmarshal ¶
func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error
type JoinType ¶
type JoinType int32
func (JoinType) EnumDescriptor ¶
func (*JoinType) UnmarshalJSON ¶
type MergeJoinerSpec ¶
type MergeJoinerSpec struct { // The streams must be ordered according to the columns that have equality // constraints. The first column of the left ordering is constrained to be // equal to the first column in the right ordering and so on. The ordering // lengths and directions must match. // In the example above, left ordering describes C1+,C2- and right ordering // describes C5+,C4-. LeftOrdering Ordering `protobuf:"bytes,1,opt,name=left_ordering,json=leftOrdering" json:"left_ordering"` RightOrdering Ordering `protobuf:"bytes,2,opt,name=right_ordering,json=rightOrdering" json:"right_ordering"` // "ON" expression (in addition to the equality constraints captured by the // orderings). Assuming that the left stream has N columns and the right // stream has M columns, in this expression ordinal references @1 to @N refer // to columns of the left stream and variables @(N+1) to @(N+M) refer to // columns in the right stream. Expr Expression `protobuf:"bytes,5,opt,name=expr" json:"expr"` Type JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"` // Columns for the output stream. Assuming that the left stream has N columns // and the right stream has M columns, column indices 0 to (N-1) refer to left // stream columns and indices N to (N+M-1) refer to right stream columns. OutputColumns []uint32 `protobuf:"varint,7,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"` }
MergeJoinerSpec is the specification for a merge join processor. The processor has two inputs and one output. The inputs must have the same ordering on the columns that have equality constraints. For example:
SELECT * FROM T1 INNER JOIN T2 ON T1.C1 = T2.C5 AND T1.C2 = T2.C4
To perform a merge join, the streams corresponding to T1 and T2 must have the same ordering on columns C1, C2 and C5, C4 respectively. For example: C1+,C2- and C5+,C4-.
It is guaranteed that the results preserve this ordering.
func (*MergeJoinerSpec) Descriptor ¶
func (*MergeJoinerSpec) Descriptor() ([]byte, []int)
func (*MergeJoinerSpec) Marshal ¶
func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)
func (*MergeJoinerSpec) ProtoMessage ¶
func (*MergeJoinerSpec) ProtoMessage()
func (*MergeJoinerSpec) Reset ¶
func (m *MergeJoinerSpec) Reset()
func (*MergeJoinerSpec) Size ¶
func (m *MergeJoinerSpec) Size() (n int)
func (*MergeJoinerSpec) String ¶
func (m *MergeJoinerSpec) String() string
func (*MergeJoinerSpec) Unmarshal ¶
func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error
type MultiplexedRowChannel ¶
type MultiplexedRowChannel struct {
// contains filtered or unexported fields
}
MultiplexedRowChannel is a RowChannel wrapper which allows multiple row producers to push rows on the same channel.
func (*MultiplexedRowChannel) Close ¶
func (mrc *MultiplexedRowChannel) Close(err error)
Close is part of the RowReceiver interface.
func (*MultiplexedRowChannel) Init ¶
func (mrc *MultiplexedRowChannel) Init(numSenders int, types []sqlbase.ColumnType)
Init initializes the MultiplexedRowChannel with the default buffer size.
func (*MultiplexedRowChannel) NextRow ¶
func (mrc *MultiplexedRowChannel) NextRow() (sqlbase.EncDatumRow, error)
NextRow is part of the RowSource interface.
func (*MultiplexedRowChannel) PushRow ¶
func (mrc *MultiplexedRowChannel) PushRow(row sqlbase.EncDatumRow) bool
PushRow is part of the RowReceiver interface.
func (*MultiplexedRowChannel) Types ¶
func (mrc *MultiplexedRowChannel) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type NoopCoreSpec ¶
type NoopCoreSpec struct { }
NoopCoreSpec indicates a "no-op" processor core. This is used when only a synchronizer is required, e.g. at the final endpoint.
func (*NoopCoreSpec) Descriptor ¶
func (*NoopCoreSpec) Descriptor() ([]byte, []int)
func (*NoopCoreSpec) Marshal ¶
func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)
func (*NoopCoreSpec) ProtoMessage ¶
func (*NoopCoreSpec) ProtoMessage()
func (*NoopCoreSpec) Reset ¶
func (m *NoopCoreSpec) Reset()
func (*NoopCoreSpec) Size ¶
func (m *NoopCoreSpec) Size() (n int)
func (*NoopCoreSpec) String ¶
func (m *NoopCoreSpec) String() string
func (*NoopCoreSpec) Unmarshal ¶
func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error
type Ordering ¶
type Ordering struct {
Columns []Ordering_Column `protobuf:"bytes,1,rep,name=columns" json:"columns"`
}
Ordering defines an order - specifically a list of column indices and directions. See sqlbase.ColumnOrdering.
func (*Ordering) Descriptor ¶
func (*Ordering) ProtoMessage ¶
func (*Ordering) ProtoMessage()
type Ordering_Column ¶
type Ordering_Column struct { ColIdx uint32 `protobuf:"varint,1,opt,name=col_idx,json=colIdx" json:"col_idx"` Direction Ordering_Column_Direction `protobuf:"varint,2,opt,name=direction,enum=cockroach.sql.distsqlrun.Ordering_Column_Direction" json:"direction"` }
func (*Ordering_Column) Descriptor ¶
func (*Ordering_Column) Descriptor() ([]byte, []int)
func (*Ordering_Column) Marshal ¶
func (m *Ordering_Column) Marshal() (dAtA []byte, err error)
func (*Ordering_Column) ProtoMessage ¶
func (*Ordering_Column) ProtoMessage()
func (*Ordering_Column) Reset ¶
func (m *Ordering_Column) Reset()
func (*Ordering_Column) Size ¶
func (m *Ordering_Column) Size() (n int)
func (*Ordering_Column) String ¶
func (m *Ordering_Column) String() string
func (*Ordering_Column) Unmarshal ¶
func (m *Ordering_Column) Unmarshal(dAtA []byte) error
type Ordering_Column_Direction ¶
type Ordering_Column_Direction int32
The direction of the desired ordering for a column.
const ( Ordering_Column_ASC Ordering_Column_Direction = 0 Ordering_Column_DESC Ordering_Column_Direction = 1 )
func (Ordering_Column_Direction) Enum ¶
func (x Ordering_Column_Direction) Enum() *Ordering_Column_Direction
func (Ordering_Column_Direction) EnumDescriptor ¶
func (Ordering_Column_Direction) EnumDescriptor() ([]byte, []int)
func (Ordering_Column_Direction) String ¶
func (x Ordering_Column_Direction) String() string
func (*Ordering_Column_Direction) UnmarshalJSON ¶
func (x *Ordering_Column_Direction) UnmarshalJSON(data []byte) error
type OutputRouterSpec ¶
type OutputRouterSpec struct { Type OutputRouterSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.OutputRouterSpec_Type" json:"type"` Streams []StreamEndpointSpec `protobuf:"bytes,2,rep,name=streams" json:"streams"` // Only used for the BY_HASH type; these are the indexes of the columns we are // hashing. HashColumns []uint32 `protobuf:"varint,3,rep,name=hash_columns,json=hashColumns" json:"hash_columns,omitempty"` }
OutputRouterSpec is the specification for the output router of a processor; it decides how to send results to multiple output streams.
func (*OutputRouterSpec) Descriptor ¶
func (*OutputRouterSpec) Descriptor() ([]byte, []int)
func (*OutputRouterSpec) Marshal ¶
func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)
func (*OutputRouterSpec) ProtoMessage ¶
func (*OutputRouterSpec) ProtoMessage()
func (*OutputRouterSpec) Reset ¶
func (m *OutputRouterSpec) Reset()
func (*OutputRouterSpec) Size ¶
func (m *OutputRouterSpec) Size() (n int)
func (*OutputRouterSpec) String ¶
func (m *OutputRouterSpec) String() string
func (*OutputRouterSpec) Unmarshal ¶
func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error
type OutputRouterSpec_Type ¶
type OutputRouterSpec_Type int32
const ( // Single output stream. OutputRouterSpec_PASS_THROUGH OutputRouterSpec_Type = 0 // Each row is sent to all output streams. OutputRouterSpec_MIRROR OutputRouterSpec_Type = 1 // Each row is sent to one stream, chosen by hashing certain columns of // the row (specified by the hash_columns field). OutputRouterSpec_BY_HASH OutputRouterSpec_Type = 2 // Each row is sent to one stream, chosen according to preset boundaries // for the values of certain columns of the row. TODO(radu): an extra // optional structure below for the range details. OutputRouterSpec_BY_RANGE OutputRouterSpec_Type = 3 )
func (OutputRouterSpec_Type) Enum ¶
func (x OutputRouterSpec_Type) Enum() *OutputRouterSpec_Type
func (OutputRouterSpec_Type) EnumDescriptor ¶
func (OutputRouterSpec_Type) EnumDescriptor() ([]byte, []int)
func (OutputRouterSpec_Type) String ¶
func (x OutputRouterSpec_Type) String() string
func (*OutputRouterSpec_Type) UnmarshalJSON ¶
func (x *OutputRouterSpec_Type) UnmarshalJSON(data []byte) error
type ProcessorCoreUnion ¶
type ProcessorCoreUnion struct { Noop *NoopCoreSpec `protobuf:"bytes,1,opt,name=noop" json:"noop,omitempty"` TableReader *TableReaderSpec `protobuf:"bytes,2,opt,name=tableReader" json:"tableReader,omitempty"` JoinReader *JoinReaderSpec `protobuf:"bytes,3,opt,name=joinReader" json:"joinReader,omitempty"` Sorter *SorterSpec `protobuf:"bytes,4,opt,name=sorter" json:"sorter,omitempty"` Aggregator *AggregatorSpec `protobuf:"bytes,5,opt,name=aggregator" json:"aggregator,omitempty"` Evaluator *EvaluatorSpec `protobuf:"bytes,6,opt,name=evaluator" json:"evaluator,omitempty"` Distinct *DistinctSpec `protobuf:"bytes,7,opt,name=distinct" json:"distinct,omitempty"` MergeJoiner *MergeJoinerSpec `protobuf:"bytes,8,opt,name=mergeJoiner" json:"mergeJoiner,omitempty"` HashJoiner *HashJoinerSpec `protobuf:"bytes,9,opt,name=hashJoiner" json:"hashJoiner,omitempty"` }
func (*ProcessorCoreUnion) Descriptor ¶
func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)
func (*ProcessorCoreUnion) GetValue ¶
func (this *ProcessorCoreUnion) GetValue() interface{}
func (*ProcessorCoreUnion) Marshal ¶
func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)
func (*ProcessorCoreUnion) MarshalTo ¶
func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)
func (*ProcessorCoreUnion) ProtoMessage ¶
func (*ProcessorCoreUnion) ProtoMessage()
func (*ProcessorCoreUnion) Reset ¶
func (m *ProcessorCoreUnion) Reset()
func (*ProcessorCoreUnion) SetValue ¶
func (this *ProcessorCoreUnion) SetValue(value interface{}) bool
func (*ProcessorCoreUnion) Size ¶
func (m *ProcessorCoreUnion) Size() (n int)
func (*ProcessorCoreUnion) String ¶
func (m *ProcessorCoreUnion) String() string
func (*ProcessorCoreUnion) Unmarshal ¶
func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error
type ProcessorSpec ¶
type ProcessorSpec struct { // In most cases, there is one input. Input []InputSyncSpec `protobuf:"bytes,1,rep,name=input" json:"input"` Core ProcessorCoreUnion `protobuf:"bytes,2,opt,name=core" json:"core"` // In most cases, there is one output. Output []OutputRouterSpec `protobuf:"bytes,3,rep,name=output" json:"output"` }
func (*ProcessorSpec) Descriptor ¶
func (*ProcessorSpec) Descriptor() ([]byte, []int)
func (*ProcessorSpec) Marshal ¶
func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)
func (*ProcessorSpec) ProtoMessage ¶
func (*ProcessorSpec) ProtoMessage()
func (*ProcessorSpec) Reset ¶
func (m *ProcessorSpec) Reset()
func (*ProcessorSpec) Size ¶
func (m *ProcessorSpec) Size() (n int)
func (*ProcessorSpec) String ¶
func (m *ProcessorSpec) String() string
func (*ProcessorSpec) Unmarshal ¶
func (m *ProcessorSpec) Unmarshal(dAtA []byte) error
type RowBuffer ¶
type RowBuffer struct { // Rows in this buffer. PushRow appends a row to the back, NextRow removes a // row from the front. Rows sqlbase.EncDatumRows // Err is used when the RowBuffer is used as a RowReceiver; it is the error // passed via Close(). Err error // Closed is used when the RowBuffer is used as a RowReceiver; it is set to // true when the sender calls Close. Closed bool // Done is used when the RowBuffer is used as a RowSource; it is set to true // when the receiver read all the rows. Done bool // contains filtered or unexported fields }
RowBuffer is an implementation of RowReceiver that buffers (accumulates) results in memory, as well as an implementation of RowSource that returns rows from a row buffer.
func NewRowBuffer ¶
func NewRowBuffer(types []sqlbase.ColumnType, rows sqlbase.EncDatumRows) *RowBuffer
NewRowBuffer creates a RowBuffer with the given schema and initial rows. The types are optional if there is at least one row.
func (*RowBuffer) NextRow ¶
func (rb *RowBuffer) NextRow() (sqlbase.EncDatumRow, error)
NextRow is part of the RowSource interface.
func (*RowBuffer) PushRow ¶
func (rb *RowBuffer) PushRow(row sqlbase.EncDatumRow) bool
PushRow is part of the RowReceiver interface.
func (*RowBuffer) Types ¶
func (rb *RowBuffer) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type RowChannel ¶
type RowChannel struct { // The channel on which rows are delivered. C <-chan StreamMsg // contains filtered or unexported fields }
RowChannel is a thin layer over a StreamMsg channel, which can be used to transfer rows between goroutines.
func (*RowChannel) Close ¶
func (rc *RowChannel) Close(err error)
Close is part of the RowReceiver interface.
func (*RowChannel) Init ¶
func (rc *RowChannel) Init(types []sqlbase.ColumnType)
Init initializes the RowChannel with the default buffer size.
func (*RowChannel) InitWithBufSize ¶
func (rc *RowChannel) InitWithBufSize(types []sqlbase.ColumnType, chanBufSize int)
InitWithBufSize initializes the RowChannel with a given buffer size.
func (*RowChannel) NextRow ¶
func (rc *RowChannel) NextRow() (sqlbase.EncDatumRow, error)
NextRow is part of the RowSource interface.
func (*RowChannel) NoMoreRows ¶
func (rc *RowChannel) NoMoreRows()
NoMoreRows causes future PushRow calls to return false. The caller should still drain the channel to make sure the sender is not blocked.
func (*RowChannel) PushRow ¶
func (rc *RowChannel) PushRow(row sqlbase.EncDatumRow) bool
PushRow is part of the RowReceiver interface.
func (*RowChannel) Types ¶
func (rc *RowChannel) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type RowReceiver ¶
type RowReceiver interface { // PushRow sends a row to this receiver. May block. // Returns true if the row was sent, or false if the receiver does not need // any more rows. In all cases, Close() still needs to be called. // The sender must not modify the row after calling this function. PushRow(row sqlbase.EncDatumRow) bool // Close is called when we have no more rows; it causes the RowReceiver to // process all rows and clean up. If err is not null, the error is sent to // the receiver (and the function may block). Close(err error) }
RowReceiver is any component of a flow that receives rows from another component. It can be an input synchronizer, a router, or a mailbox.
type RowSource ¶
type RowSource interface { // Types returns the schema for the rows in this source. Types() []sqlbase.ColumnType // NextRow retrieves the next row. Returns a nil row if there are no more // rows. Depending on the implementation, it may block. // The caller must not modify the received row. NextRow() (sqlbase.EncDatumRow, error) }
RowSource is any component of a flow that produces rows that cam be consumed by another component.
type ServerConfig ¶
type ServerConfig struct { log.AmbientContext DB *client.DB RPCContext *rpc.Context Stopper *stop.Stopper }
ServerConfig encompasses the configuration required to create a DistSQLServer.
type ServerImpl ¶
type ServerImpl struct { ServerConfig // contains filtered or unexported fields }
ServerImpl implements the server for the distributed SQL APIs.
func NewServer ¶
func NewServer(cfg ServerConfig) *ServerImpl
NewServer instantiates a DistSQLServer.
func (*ServerImpl) FlowStream ¶
func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error
FlowStream is part of the DistSQLServer interface.
func (*ServerImpl) RunSyncFlow ¶
func (ds *ServerImpl) RunSyncFlow(req *SetupFlowRequest, stream DistSQL_RunSyncFlowServer) error
RunSyncFlow is part of the DistSQLServer interface.
func (*ServerImpl) SetupFlow ¶
func (ds *ServerImpl) SetupFlow(_ context.Context, req *SetupFlowRequest) (*SimpleResponse, error)
SetupFlow is part of the DistSQLServer interface.
func (*ServerImpl) SetupSyncFlow ¶
func (ds *ServerImpl) SetupSyncFlow( ctx context.Context, req *SetupFlowRequest, output RowReceiver, ) (*Flow, error)
SetupSyncFlow sets up a synchoronous flow, connecting the sync response output stream to the given RowReceiver. The flow is not started. The flow will be associated with the given context.
type SetupFlowRequest ¶
type SetupFlowRequest struct { Txn cockroach_roachpb1.Transaction `protobuf:"bytes,1,opt,name=txn" json:"txn"` // If set, the context of an active tracing span. TraceContext *cockroach_util_tracing.SpanContextCarrier `protobuf:"bytes,2,opt,name=trace_context,json=traceContext" json:"trace_context,omitempty"` Flow FlowSpec `protobuf:"bytes,3,opt,name=flow" json:"flow"` }
func (*SetupFlowRequest) Descriptor ¶
func (*SetupFlowRequest) Descriptor() ([]byte, []int)
func (*SetupFlowRequest) Marshal ¶
func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)
func (*SetupFlowRequest) ProtoMessage ¶
func (*SetupFlowRequest) ProtoMessage()
func (*SetupFlowRequest) Reset ¶
func (m *SetupFlowRequest) Reset()
func (*SetupFlowRequest) Size ¶
func (m *SetupFlowRequest) Size() (n int)
func (*SetupFlowRequest) String ¶
func (m *SetupFlowRequest) String() string
func (*SetupFlowRequest) Unmarshal ¶
func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error
type SimpleResponse ¶
type SimpleResponse struct { // TODO(radu): we should be using our own error instead of roachpb.Error. // One important error field for distsql is whether the error is // "authoritative": if a query is distributed on multiple nodes and one node // hits an error, the other nodes may hit errors as well as a consequence // (e.g. streams can't connect to the failed flow). The node that started the // flow needs to distinguish which errors are caused by non-availability of // other nodes so they don't obscure the real error. Error *cockroach_roachpb2.Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` }
func (*SimpleResponse) Descriptor ¶
func (*SimpleResponse) Descriptor() ([]byte, []int)
func (*SimpleResponse) Marshal ¶
func (m *SimpleResponse) Marshal() (dAtA []byte, err error)
func (*SimpleResponse) ProtoMessage ¶
func (*SimpleResponse) ProtoMessage()
func (*SimpleResponse) Reset ¶
func (m *SimpleResponse) Reset()
func (*SimpleResponse) Size ¶
func (m *SimpleResponse) Size() (n int)
func (*SimpleResponse) String ¶
func (m *SimpleResponse) String() string
func (*SimpleResponse) Unmarshal ¶
func (m *SimpleResponse) Unmarshal(dAtA []byte) error
type SorterSpec ¶
type SorterSpec struct { OutputOrdering Ordering `protobuf:"bytes,1,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"` // Ordering match length, specifying that the input is already sorted by the // first 'n' output ordering columns, can be optionally specified for // possible speed-ups taking advantage of the partial orderings. OrderingMatchLen uint32 `protobuf:"varint,2,opt,name=ordering_match_len,json=orderingMatchLen" json:"ordering_match_len"` // Limits can be optionally specified to allow for further optimizations // taking advantage of the fact that only the top 'k' results are needed. Limit int64 `protobuf:"varint,3,opt,name=limit" json:"limit"` }
SorterSpec is the specification for a "sorting aggregator". A sorting aggregator sorts elements in the input stream providing a certain output order guarantee regardless of the input ordering. The output ordering is according to a configurable set of columns.
func (*SorterSpec) Descriptor ¶
func (*SorterSpec) Descriptor() ([]byte, []int)
func (*SorterSpec) Marshal ¶
func (m *SorterSpec) Marshal() (dAtA []byte, err error)
func (*SorterSpec) ProtoMessage ¶
func (*SorterSpec) ProtoMessage()
func (*SorterSpec) Reset ¶
func (m *SorterSpec) Reset()
func (*SorterSpec) Size ¶
func (m *SorterSpec) Size() (n int)
func (*SorterSpec) String ¶
func (m *SorterSpec) String() string
func (*SorterSpec) Unmarshal ¶
func (m *SorterSpec) Unmarshal(dAtA []byte) error
type StreamData ¶
type StreamData struct { // Encodes one or more data rows. Each datum is encoded according to the // corresponding DatumInfo. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"` }
StreamData is a message that can be sent multiple times as part of a stream.
func (*StreamData) Descriptor ¶
func (*StreamData) Descriptor() ([]byte, []int)
func (*StreamData) Marshal ¶
func (m *StreamData) Marshal() (dAtA []byte, err error)
func (*StreamData) ProtoMessage ¶
func (*StreamData) ProtoMessage()
func (*StreamData) Reset ¶
func (m *StreamData) Reset()
func (*StreamData) Size ¶
func (m *StreamData) Size() (n int)
func (*StreamData) String ¶
func (m *StreamData) String() string
func (*StreamData) Unmarshal ¶
func (m *StreamData) Unmarshal(dAtA []byte) error
type StreamDecoder ¶
type StreamDecoder struct {
// contains filtered or unexported fields
}
StreamDecoder converts a sequence of StreamMessage to EncDatumRows.
Sample usage:
sd := StreamDecoder{} var row sqlbase.EncDatumRow for each message in stream { err := sd.AddMessage(msg) if err != nil { ... } for { row, err := sd.GetRow(row) if err != nil { ... } if row == nil { // No more rows in this message. break } // Use <row> ... } }
AddMessage can be called multiple times before getting the rows, but this will cause data to accumulate internally.
func (*StreamDecoder) AddMessage ¶
func (sd *StreamDecoder) AddMessage(msg *StreamMessage) error
AddMessage adds the data in a StreamMessage to the decoder.
The StreamDecoder may keep a reference to msg.Data.RawBytes until all the rows in the message are retrieved with GetRow.
func (*StreamDecoder) GetRow ¶
func (sd *StreamDecoder) GetRow(rowBuf sqlbase.EncDatumRow) (sqlbase.EncDatumRow, error)
GetRow returns a row of EncDatums received in the stream. A row buffer can be provided optionally. Returns nil if there are no more rows received so far.
func (*StreamDecoder) IsDone ¶
func (sd *StreamDecoder) IsDone() (bool, error)
IsDone returns true if all the rows were returned and the stream trailer was received (in which case any error in the trailer is returned as well).
type StreamEncoder ¶
type StreamEncoder struct {
// contains filtered or unexported fields
}
StreamEncoder converts EncDatum rows into a sequence of StreamMessage.
Sample usage:
se := StreamEncoder{} for { for ... { err := se.AddRow(...) ... } msg := se.FormMessage(false, nil) // Send out message. ... } msg := se.FormMessage(true, nil) // Send out final message ...
func (*StreamEncoder) AddRow ¶
func (se *StreamEncoder) AddRow(row sqlbase.EncDatumRow) error
AddRow encodes a row.
func (*StreamEncoder) FormMessage ¶
func (se *StreamEncoder) FormMessage(final bool, trailerErr error) *StreamMessage
FormMessage populates a message containing the rows added since the last call to FormMessage. The returned StreamMessage should be treated as immutable. If final is true, a message trailer is populated with the given error.
type StreamEndpointSpec ¶
type StreamEndpointSpec struct { Type StreamEndpointSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.StreamEndpointSpec_Type" json:"type"` // The ID of this stream. // // For LOCAL streams, both ends of the stream are part of the flow on this // machine (and there must be a corresponding endpoint with the same ID). // // For REMOTE streams, this ID is used in the StreamHeader when connecting to // the other host. // // For SYNC_RESPONSE streams, the ID is unused. StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"` // Serving address for the target host, only used for outgoing REMOTE streams. TargetAddr string `protobuf:"bytes,3,opt,name=target_addr,json=targetAddr" json:"target_addr"` }
StreamEndpointSpec describes one of the endpoints (input or output) of a physical stream.
func (*StreamEndpointSpec) Descriptor ¶
func (*StreamEndpointSpec) Descriptor() ([]byte, []int)
func (*StreamEndpointSpec) Marshal ¶
func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)
func (*StreamEndpointSpec) MarshalTo ¶
func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)
func (*StreamEndpointSpec) ProtoMessage ¶
func (*StreamEndpointSpec) ProtoMessage()
func (*StreamEndpointSpec) Reset ¶
func (m *StreamEndpointSpec) Reset()
func (*StreamEndpointSpec) Size ¶
func (m *StreamEndpointSpec) Size() (n int)
func (*StreamEndpointSpec) String ¶
func (m *StreamEndpointSpec) String() string
func (*StreamEndpointSpec) Unmarshal ¶
func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error
type StreamEndpointSpec_Type ¶
type StreamEndpointSpec_Type int32
const ( // Stream that is part of the local flow. StreamEndpointSpec_LOCAL StreamEndpointSpec_Type = 0 // Stream that has the other endpoint on a different node. StreamEndpointSpec_REMOTE StreamEndpointSpec_Type = 1 // Special stream used when in "sync flow" mode. In this mode, we return // results directly as part of the RPC call that set up the flow. This saves // overhead (extra RPCs) compared to the normal mode where the RPC just sets // up the flow. This type can only be used with outbound endpoints. StreamEndpointSpec_SYNC_RESPONSE StreamEndpointSpec_Type = 2 )
func (StreamEndpointSpec_Type) Enum ¶
func (x StreamEndpointSpec_Type) Enum() *StreamEndpointSpec_Type
func (StreamEndpointSpec_Type) EnumDescriptor ¶
func (StreamEndpointSpec_Type) EnumDescriptor() ([]byte, []int)
func (StreamEndpointSpec_Type) String ¶
func (x StreamEndpointSpec_Type) String() string
func (*StreamEndpointSpec_Type) UnmarshalJSON ¶
func (x *StreamEndpointSpec_Type) UnmarshalJSON(data []byte) error
type StreamHeader ¶
type StreamHeader struct { FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"` StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"` // There is one DatumInfo for each element in a row. Info []DatumInfo `protobuf:"bytes,3,rep,name=info" json:"info"` }
StreamHeader is a message that is sent once at the beginning of a stream.
func (*StreamHeader) Descriptor ¶
func (*StreamHeader) Descriptor() ([]byte, []int)
func (*StreamHeader) Marshal ¶
func (m *StreamHeader) Marshal() (dAtA []byte, err error)
func (*StreamHeader) ProtoMessage ¶
func (*StreamHeader) ProtoMessage()
func (*StreamHeader) Reset ¶
func (m *StreamHeader) Reset()
func (*StreamHeader) Size ¶
func (m *StreamHeader) Size() (n int)
func (*StreamHeader) String ¶
func (m *StreamHeader) String() string
func (*StreamHeader) Unmarshal ¶
func (m *StreamHeader) Unmarshal(dAtA []byte) error
type StreamID ¶
type StreamID int
StreamID identifies a stream; it may be local to a flow or it may cross machine boundaries. The identifier can only be used in the context of a specific flow.
type StreamMessage ¶
type StreamMessage struct { // Header is present in the first message. Header *StreamHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // Data is present in all messages except possibly the first and last. Data StreamData `protobuf:"bytes,2,opt,name=data" json:"data"` // Trailer is present in the last message. Trailer *StreamTrailer `protobuf:"bytes,3,opt,name=trailer" json:"trailer,omitempty"` }
func (*StreamMessage) Descriptor ¶
func (*StreamMessage) Descriptor() ([]byte, []int)
func (*StreamMessage) Marshal ¶
func (m *StreamMessage) Marshal() (dAtA []byte, err error)
func (*StreamMessage) ProtoMessage ¶
func (*StreamMessage) ProtoMessage()
func (*StreamMessage) Reset ¶
func (m *StreamMessage) Reset()
func (*StreamMessage) Size ¶
func (m *StreamMessage) Size() (n int)
func (*StreamMessage) String ¶
func (m *StreamMessage) String() string
func (*StreamMessage) Unmarshal ¶
func (m *StreamMessage) Unmarshal(dAtA []byte) error
type StreamMsg ¶
type StreamMsg struct { // Only one of these fields will be set. Row sqlbase.EncDatumRow Err error }
StreamMsg is the message used in the channels that implement local physical streams.
type StreamTrailer ¶
type StreamTrailer struct {
Error *cockroach_roachpb2.Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}
StreamTrailer is a message that is sent once at the end of a stream.
func (*StreamTrailer) Descriptor ¶
func (*StreamTrailer) Descriptor() ([]byte, []int)
func (*StreamTrailer) Marshal ¶
func (m *StreamTrailer) Marshal() (dAtA []byte, err error)
func (*StreamTrailer) ProtoMessage ¶
func (*StreamTrailer) ProtoMessage()
func (*StreamTrailer) Reset ¶
func (m *StreamTrailer) Reset()
func (*StreamTrailer) Size ¶
func (m *StreamTrailer) Size() (n int)
func (*StreamTrailer) String ¶
func (m *StreamTrailer) String() string
func (*StreamTrailer) Unmarshal ¶
func (m *StreamTrailer) Unmarshal(dAtA []byte) error
type TableReaderSpan ¶
type TableReaderSpan struct { // TODO(radu): the dist_sql APIs should be agnostic to how we map tables to // KVs. The span should be described as starting and ending lists of values // for a prefix of the index columns, along with inclusive/exclusive flags. Span cockroach_roachpb1.Span `protobuf:"bytes,1,opt,name=span" json:"span"` }
func (*TableReaderSpan) Descriptor ¶
func (*TableReaderSpan) Descriptor() ([]byte, []int)
func (*TableReaderSpan) Marshal ¶
func (m *TableReaderSpan) Marshal() (dAtA []byte, err error)
func (*TableReaderSpan) ProtoMessage ¶
func (*TableReaderSpan) ProtoMessage()
func (*TableReaderSpan) Reset ¶
func (m *TableReaderSpan) Reset()
func (*TableReaderSpan) Size ¶
func (m *TableReaderSpan) Size() (n int)
func (*TableReaderSpan) String ¶
func (m *TableReaderSpan) String() string
func (*TableReaderSpan) Unmarshal ¶
func (m *TableReaderSpan) Unmarshal(dAtA []byte) error
type TableReaderSpec ¶
type TableReaderSpec struct { Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"` // If 0, we use the primary index. If non-zero, we use the index_idx-th index, // i.e. table.indexes[index_idx-1] IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"` Reverse bool `protobuf:"varint,3,opt,name=reverse" json:"reverse"` Spans []TableReaderSpan `protobuf:"bytes,4,rep,name=spans" json:"spans"` // The filter expression references the columns in the table (table.columns) // via ordinal references (@1, @2, etc). If a secondary index is used, the // columns that are not available as part of the index cannot be referenced. Filter Expression `protobuf:"bytes,5,opt,name=filter" json:"filter"` // The table reader will only produce values for these columns, referenced by // their indices in table.columns. OutputColumns []uint32 `protobuf:"varint,6,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"` // If nonzero, the table reader only needs to return this many rows. HardLimit int64 `protobuf:"varint,8,opt,name=hard_limit,json=hardLimit" json:"hard_limit"` // The soft limit is a hint for how many rows the consumer of the table reader // output might need. If both the hard limit and the soft limit are set, the // soft limit must be lower than the hard limit. SoftLimit int64 `protobuf:"varint,7,opt,name=soft_limit,json=softLimit" json:"soft_limit"` }
TableReaderSpec is the specification for a "table reader". A table reader performs KV operations to retrieve rows for a table and outputs the desired columns of the rows that pass a filter expression.
func (*TableReaderSpec) Descriptor ¶
func (*TableReaderSpec) Descriptor() ([]byte, []int)
func (*TableReaderSpec) Marshal ¶
func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)
func (*TableReaderSpec) ProtoMessage ¶
func (*TableReaderSpec) ProtoMessage()
func (*TableReaderSpec) Reset ¶
func (m *TableReaderSpec) Reset()
func (*TableReaderSpec) Size ¶
func (m *TableReaderSpec) Size() (n int)
func (*TableReaderSpec) String ¶
func (m *TableReaderSpec) String() string
func (*TableReaderSpec) Unmarshal ¶
func (m *TableReaderSpec) Unmarshal(dAtA []byte) error
Source Files ¶
- aggregator.go
- api.pb.go
- base.go
- data.go
- data.pb.go
- distinct.go
- evaluator.go
- expr.go
- flow.go
- flow_diagram.go
- flow_registry.go
- flow_scheduler.go
- hashjoiner.go
- inbound.go
- input_sync.go
- joinerbase.go
- joinreader.go
- mergejoiner.go
- outbox.go
- processors.pb.go
- readerbase.go
- routers.go
- server.go
- sorter.go
- sorterstrategy.go
- sortervalues.go
- stream_decoder.go
- stream_encoder.go
- stream_merger.go
- tablereader.go