Documentation ¶
Overview ¶
Package distsqlrun is a generated protocol buffer package.
It is generated from these files:
cockroach/pkg/sql/distsqlrun/api.proto cockroach/pkg/sql/distsqlrun/data.proto cockroach/pkg/sql/distsqlrun/processors.proto
It has these top-level messages:
SetupFlowRequest EvalContext SimpleResponse ConsumerSignal DrainRequest ConsumerHandshake Error Expression Ordering StreamEndpointSpec InputSyncSpec OutputRouterSpec DatumInfo ProducerHeader ProducerData ProducerMessage RemoteProducerMetadata DistSQLVersionGossipInfo ProcessorSpec PostProcessSpec ProcessorCoreUnion NoopCoreSpec ValuesCoreSpec TableReaderSpan TableReaderSpec JoinReaderSpec SorterSpec DistinctSpec MergeJoinerSpec HashJoinerSpec AggregatorSpec BackfillerSpec FlowSpec AlgebraicSetOpSpec ReadCSVSpec SSTWriterSpec
Index ¶
- Constants
- Variables
- func ColumnMutationFilter(m sqlbase.DescriptorMutation) bool
- func CompareEncDatumRowForMerge(lhs, rhs sqlbase.EncDatumRow, ...) (int, error)
- func ConvertBackfillError(ctx context.Context, tableDesc *sqlbase.TableDescriptor, b *client.Batch) error
- func DrainAndClose(ctx context.Context, dst RowReceiver, cause error, srcs ...RowSource)
- func DrainAndForwardMetadata(ctx context.Context, src RowSource, dst RowReceiver)
- func FlowVerIsCompatible(flowVer, minAcceptedVersion, serverVersion DistSQLVersion) bool
- func GeneratePlanDiagram(flows map[roachpb.NodeID]FlowSpec, w io.Writer) error
- func GeneratePlanDiagramWithURL(flows map[roachpb.NodeID]FlowSpec) (string, url.URL, error)
- func GetAggregateInfo(fn AggregatorSpec_Func, inputTypes ...sqlbase.ColumnType) (aggregateConstructor func(*parser.EvalContext) parser.AggregateFunc, ...)
- func GetResumeSpanIndexofMutationID(tableDesc *sqlbase.TableDescriptor, mutationIdx int) int
- func GetResumeSpansFromJob(ctx context.Context, jobsRegistry *jobs.Registry, txn *client.Txn, jobID int64, ...) ([]roachpb.Span, error)
- func IndexMutationFilter(m sqlbase.DescriptorMutation) bool
- func ProcessInboundStream(ctx context.Context, stream DistSQL_FlowStreamServer, ...) error
- func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)
- func SetResumeSpansInJob(ctx context.Context, spans []roachpb.Span, jobsRegistry *jobs.Registry, ...) error
- func WriteResumeSpan(ctx context.Context, db *client.DB, id sqlbase.ID, origSpan roachpb.Span, ...) error
- type AggregatorSpec
- func (*AggregatorSpec) Descriptor() ([]byte, []int)
- func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)
- func (m *AggregatorSpec) MarshalTo(dAtA []byte) (int, error)
- func (*AggregatorSpec) ProtoMessage()
- func (m *AggregatorSpec) Reset()
- func (m *AggregatorSpec) Size() (n int)
- func (m *AggregatorSpec) String() string
- func (m *AggregatorSpec) Unmarshal(dAtA []byte) error
- type AggregatorSpec_Aggregation
- func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)
- func (a AggregatorSpec_Aggregation) Equals(b AggregatorSpec_Aggregation) bool
- func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)
- func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)
- func (*AggregatorSpec_Aggregation) ProtoMessage()
- func (m *AggregatorSpec_Aggregation) Reset()
- func (m *AggregatorSpec_Aggregation) Size() (n int)
- func (m *AggregatorSpec_Aggregation) String() string
- func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error
- type AggregatorSpec_Func
- type AlgebraicSetOpSpec
- func (*AlgebraicSetOpSpec) Descriptor() ([]byte, []int)
- func (m *AlgebraicSetOpSpec) Marshal() (dAtA []byte, err error)
- func (m *AlgebraicSetOpSpec) MarshalTo(dAtA []byte) (int, error)
- func (*AlgebraicSetOpSpec) ProtoMessage()
- func (m *AlgebraicSetOpSpec) Reset()
- func (m *AlgebraicSetOpSpec) Size() (n int)
- func (m *AlgebraicSetOpSpec) String() string
- func (m *AlgebraicSetOpSpec) Unmarshal(dAtA []byte) error
- type AlgebraicSetOpSpec_SetOpType
- type BackfillerSpec
- func (*BackfillerSpec) Descriptor() ([]byte, []int)
- func (m *BackfillerSpec) Marshal() (dAtA []byte, err error)
- func (m *BackfillerSpec) MarshalTo(dAtA []byte) (int, error)
- func (*BackfillerSpec) ProtoMessage()
- func (m *BackfillerSpec) Reset()
- func (m *BackfillerSpec) Size() (n int)
- func (m *BackfillerSpec) String() string
- func (m *BackfillerSpec) Unmarshal(dAtA []byte) error
- type BackfillerSpec_Type
- type BufferedRecord
- type CancellableRowReceiver
- type ConsumerHandshake
- func (*ConsumerHandshake) Descriptor() ([]byte, []int)
- func (m *ConsumerHandshake) Marshal() (dAtA []byte, err error)
- func (m *ConsumerHandshake) MarshalTo(dAtA []byte) (int, error)
- func (*ConsumerHandshake) ProtoMessage()
- func (m *ConsumerHandshake) Reset()
- func (m *ConsumerHandshake) Size() (n int)
- func (m *ConsumerHandshake) String() string
- func (m *ConsumerHandshake) Unmarshal(dAtA []byte) error
- type ConsumerSignal
- func (*ConsumerSignal) Descriptor() ([]byte, []int)
- func (this *ConsumerSignal) GetValue() interface{}
- func (m *ConsumerSignal) Marshal() (dAtA []byte, err error)
- func (m *ConsumerSignal) MarshalTo(dAtA []byte) (int, error)
- func (*ConsumerSignal) ProtoMessage()
- func (m *ConsumerSignal) Reset()
- func (this *ConsumerSignal) SetValue(value interface{}) bool
- func (m *ConsumerSignal) Size() (n int)
- func (m *ConsumerSignal) String() string
- func (m *ConsumerSignal) Unmarshal(dAtA []byte) error
- type ConsumerStatus
- type DatumInfo
- func (*DatumInfo) Descriptor() ([]byte, []int)
- func (m *DatumInfo) Marshal() (dAtA []byte, err error)
- func (m *DatumInfo) MarshalTo(dAtA []byte) (int, error)
- func (*DatumInfo) ProtoMessage()
- func (m *DatumInfo) Reset()
- func (m *DatumInfo) Size() (n int)
- func (m *DatumInfo) String() string
- func (m *DatumInfo) Unmarshal(dAtA []byte) error
- type DistSQLClient
- type DistSQLMetrics
- type DistSQLServer
- type DistSQLVersion
- type DistSQLVersionGossipInfo
- func (*DistSQLVersionGossipInfo) Descriptor() ([]byte, []int)
- func (m *DistSQLVersionGossipInfo) Marshal() (dAtA []byte, err error)
- func (m *DistSQLVersionGossipInfo) MarshalTo(dAtA []byte) (int, error)
- func (*DistSQLVersionGossipInfo) ProtoMessage()
- func (m *DistSQLVersionGossipInfo) Reset()
- func (m *DistSQLVersionGossipInfo) Size() (n int)
- func (m *DistSQLVersionGossipInfo) String() string
- func (m *DistSQLVersionGossipInfo) Unmarshal(dAtA []byte) error
- type DistSQL_FlowStreamClient
- type DistSQL_FlowStreamServer
- type DistSQL_RunSyncFlowClient
- type DistSQL_RunSyncFlowServer
- type DistinctSpec
- func (*DistinctSpec) Descriptor() ([]byte, []int)
- func (m *DistinctSpec) Marshal() (dAtA []byte, err error)
- func (m *DistinctSpec) MarshalTo(dAtA []byte) (int, error)
- func (*DistinctSpec) ProtoMessage()
- func (m *DistinctSpec) Reset()
- func (m *DistinctSpec) Size() (n int)
- func (m *DistinctSpec) String() string
- func (m *DistinctSpec) Unmarshal(dAtA []byte) error
- type DrainRequest
- func (*DrainRequest) Descriptor() ([]byte, []int)
- func (m *DrainRequest) Marshal() (dAtA []byte, err error)
- func (m *DrainRequest) MarshalTo(dAtA []byte) (int, error)
- func (*DrainRequest) ProtoMessage()
- func (m *DrainRequest) Reset()
- func (m *DrainRequest) Size() (n int)
- func (m *DrainRequest) String() string
- func (m *DrainRequest) Unmarshal(dAtA []byte) error
- type Error
- func (*Error) Descriptor() ([]byte, []int)
- func (e *Error) ErrorDetail() error
- func (m *Error) GetDetail() isError_Detail
- func (m *Error) GetPGError() *cockroach_pgerror.Error
- func (m *Error) GetRetryableTxnError() *cockroach_roachpb2.UnhandledRetryableError
- func (m *Error) Marshal() (dAtA []byte, err error)
- func (m *Error) MarshalTo(dAtA []byte) (int, error)
- func (*Error) ProtoMessage()
- func (m *Error) Reset()
- func (m *Error) Size() (n int)
- func (e *Error) String() string
- func (m *Error) Unmarshal(dAtA []byte) error
- func (*Error) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type Error_PGError
- type Error_RetryableTxnError
- type EvalContext
- func (*EvalContext) Descriptor() ([]byte, []int)
- func (m *EvalContext) Marshal() (dAtA []byte, err error)
- func (m *EvalContext) MarshalTo(dAtA []byte) (int, error)
- func (*EvalContext) ProtoMessage()
- func (m *EvalContext) Reset()
- func (m *EvalContext) Size() (n int)
- func (m *EvalContext) String() string
- func (m *EvalContext) Unmarshal(dAtA []byte) error
- type Expression
- func (*Expression) Descriptor() ([]byte, []int)
- func (m *Expression) Marshal() (dAtA []byte, err error)
- func (m *Expression) MarshalTo(dAtA []byte) (int, error)
- func (*Expression) ProtoMessage()
- func (m *Expression) Reset()
- func (m *Expression) Size() (n int)
- func (m *Expression) String() string
- func (m *Expression) Unmarshal(dAtA []byte) error
- type Flow
- type FlowCtx
- type FlowID
- type FlowSpec
- func (*FlowSpec) Descriptor() ([]byte, []int)
- func (m *FlowSpec) Marshal() (dAtA []byte, err error)
- func (m *FlowSpec) MarshalTo(dAtA []byte) (int, error)
- func (*FlowSpec) ProtoMessage()
- func (m *FlowSpec) Reset()
- func (m *FlowSpec) Size() (n int)
- func (m *FlowSpec) String() string
- func (m *FlowSpec) Unmarshal(dAtA []byte) error
- type HashJoinerSpec
- func (*HashJoinerSpec) Descriptor() ([]byte, []int)
- func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)
- func (m *HashJoinerSpec) MarshalTo(dAtA []byte) (int, error)
- func (*HashJoinerSpec) ProtoMessage()
- func (m *HashJoinerSpec) Reset()
- func (m *HashJoinerSpec) Size() (n int)
- func (m *HashJoinerSpec) String() string
- func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error
- type InputSyncSpec
- func (*InputSyncSpec) Descriptor() ([]byte, []int)
- func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)
- func (m *InputSyncSpec) MarshalTo(dAtA []byte) (int, error)
- func (*InputSyncSpec) ProtoMessage()
- func (m *InputSyncSpec) Reset()
- func (m *InputSyncSpec) Size() (n int)
- func (m *InputSyncSpec) String() string
- func (m *InputSyncSpec) Unmarshal(dAtA []byte) error
- type InputSyncSpec_Type
- type JoinReaderSpec
- func (*JoinReaderSpec) Descriptor() ([]byte, []int)
- func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)
- func (m *JoinReaderSpec) MarshalTo(dAtA []byte) (int, error)
- func (*JoinReaderSpec) ProtoMessage()
- func (m *JoinReaderSpec) Reset()
- func (m *JoinReaderSpec) Size() (n int)
- func (m *JoinReaderSpec) String() string
- func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error
- type JoinType
- type MergeJoinerSpec
- func (*MergeJoinerSpec) Descriptor() ([]byte, []int)
- func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)
- func (m *MergeJoinerSpec) MarshalTo(dAtA []byte) (int, error)
- func (*MergeJoinerSpec) ProtoMessage()
- func (m *MergeJoinerSpec) Reset()
- func (m *MergeJoinerSpec) Size() (n int)
- func (m *MergeJoinerSpec) String() string
- func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error
- type MultiplexedRowChannel
- func (mrc *MultiplexedRowChannel) ConsumerClosed()
- func (mrc *MultiplexedRowChannel) ConsumerDone()
- func (mrc *MultiplexedRowChannel) Init(numSenders int, types []sqlbase.ColumnType)
- func (mrc *MultiplexedRowChannel) Next() (row sqlbase.EncDatumRow, meta ProducerMetadata)
- func (mrc *MultiplexedRowChannel) ProducerDone()
- func (mrc *MultiplexedRowChannel) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
- func (mrc *MultiplexedRowChannel) Types() []sqlbase.ColumnType
- type MutationFilter
- type NoMetadataRowSource
- type NoopCoreSpec
- func (*NoopCoreSpec) Descriptor() ([]byte, []int)
- func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)
- func (m *NoopCoreSpec) MarshalTo(dAtA []byte) (int, error)
- func (*NoopCoreSpec) ProtoMessage()
- func (m *NoopCoreSpec) Reset()
- func (m *NoopCoreSpec) Size() (n int)
- func (m *NoopCoreSpec) String() string
- func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error
- type Ordering
- func (*Ordering) Descriptor() ([]byte, []int)
- func (m *Ordering) Marshal() (dAtA []byte, err error)
- func (m *Ordering) MarshalTo(dAtA []byte) (int, error)
- func (*Ordering) ProtoMessage()
- func (m *Ordering) Reset()
- func (m *Ordering) Size() (n int)
- func (m *Ordering) String() string
- func (m *Ordering) Unmarshal(dAtA []byte) error
- type Ordering_Column
- func (*Ordering_Column) Descriptor() ([]byte, []int)
- func (m *Ordering_Column) Marshal() (dAtA []byte, err error)
- func (m *Ordering_Column) MarshalTo(dAtA []byte) (int, error)
- func (*Ordering_Column) ProtoMessage()
- func (m *Ordering_Column) Reset()
- func (m *Ordering_Column) Size() (n int)
- func (m *Ordering_Column) String() string
- func (m *Ordering_Column) Unmarshal(dAtA []byte) error
- type Ordering_Column_Direction
- type OutputRouterSpec
- func (*OutputRouterSpec) Descriptor() ([]byte, []int)
- func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)
- func (m *OutputRouterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*OutputRouterSpec) ProtoMessage()
- func (m *OutputRouterSpec) Reset()
- func (m *OutputRouterSpec) Size() (n int)
- func (m *OutputRouterSpec) String() string
- func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error
- type OutputRouterSpec_RangeRouterSpec
- func (*OutputRouterSpec_RangeRouterSpec) Descriptor() ([]byte, []int)
- func (m *OutputRouterSpec_RangeRouterSpec) Marshal() (dAtA []byte, err error)
- func (m *OutputRouterSpec_RangeRouterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage()
- func (m *OutputRouterSpec_RangeRouterSpec) Reset()
- func (m *OutputRouterSpec_RangeRouterSpec) Size() (n int)
- func (m *OutputRouterSpec_RangeRouterSpec) String() string
- func (m *OutputRouterSpec_RangeRouterSpec) Unmarshal(dAtA []byte) error
- type OutputRouterSpec_RangeRouterSpec_ColumnEncoding
- func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor() ([]byte, []int)
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal() (dAtA []byte, err error)
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo(dAtA []byte) (int, error)
- func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage()
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset()
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size() (n int)
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String() string
- func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal(dAtA []byte) error
- type OutputRouterSpec_RangeRouterSpec_Span
- func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor() ([]byte, []int)
- func (m *OutputRouterSpec_RangeRouterSpec_Span) Marshal() (dAtA []byte, err error)
- func (m *OutputRouterSpec_RangeRouterSpec_Span) MarshalTo(dAtA []byte) (int, error)
- func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage()
- func (m *OutputRouterSpec_RangeRouterSpec_Span) Reset()
- func (m *OutputRouterSpec_RangeRouterSpec_Span) Size() (n int)
- func (m *OutputRouterSpec_RangeRouterSpec_Span) String() string
- func (m *OutputRouterSpec_RangeRouterSpec_Span) Unmarshal(dAtA []byte) error
- type OutputRouterSpec_Type
- type PostProcessSpec
- func (*PostProcessSpec) Descriptor() ([]byte, []int)
- func (m *PostProcessSpec) Marshal() (dAtA []byte, err error)
- func (m *PostProcessSpec) MarshalTo(dAtA []byte) (int, error)
- func (*PostProcessSpec) ProtoMessage()
- func (m *PostProcessSpec) Reset()
- func (m *PostProcessSpec) Size() (n int)
- func (m *PostProcessSpec) String() string
- func (m *PostProcessSpec) Unmarshal(dAtA []byte) error
- type ProcOutputHelper
- type Processor
- type ProcessorCoreUnion
- func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)
- func (this *ProcessorCoreUnion) GetValue() interface{}
- func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)
- func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)
- func (*ProcessorCoreUnion) ProtoMessage()
- func (m *ProcessorCoreUnion) Reset()
- func (this *ProcessorCoreUnion) SetValue(value interface{}) bool
- func (m *ProcessorCoreUnion) Size() (n int)
- func (m *ProcessorCoreUnion) String() string
- func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error
- type ProcessorSpec
- func (*ProcessorSpec) Descriptor() ([]byte, []int)
- func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)
- func (m *ProcessorSpec) MarshalTo(dAtA []byte) (int, error)
- func (*ProcessorSpec) ProtoMessage()
- func (m *ProcessorSpec) Reset()
- func (m *ProcessorSpec) Size() (n int)
- func (m *ProcessorSpec) String() string
- func (m *ProcessorSpec) Unmarshal(dAtA []byte) error
- type ProducerData
- func (*ProducerData) Descriptor() ([]byte, []int)
- func (m *ProducerData) Marshal() (dAtA []byte, err error)
- func (m *ProducerData) MarshalTo(dAtA []byte) (int, error)
- func (*ProducerData) ProtoMessage()
- func (m *ProducerData) Reset()
- func (m *ProducerData) Size() (n int)
- func (m *ProducerData) String() string
- func (m *ProducerData) Unmarshal(dAtA []byte) error
- type ProducerHeader
- func (*ProducerHeader) Descriptor() ([]byte, []int)
- func (m *ProducerHeader) Marshal() (dAtA []byte, err error)
- func (m *ProducerHeader) MarshalTo(dAtA []byte) (int, error)
- func (*ProducerHeader) ProtoMessage()
- func (m *ProducerHeader) Reset()
- func (m *ProducerHeader) Size() (n int)
- func (m *ProducerHeader) String() string
- func (m *ProducerHeader) Unmarshal(dAtA []byte) error
- type ProducerMessage
- func (*ProducerMessage) Descriptor() ([]byte, []int)
- func (m *ProducerMessage) Marshal() (dAtA []byte, err error)
- func (m *ProducerMessage) MarshalTo(dAtA []byte) (int, error)
- func (*ProducerMessage) ProtoMessage()
- func (m *ProducerMessage) Reset()
- func (m *ProducerMessage) Size() (n int)
- func (m *ProducerMessage) String() string
- func (m *ProducerMessage) Unmarshal(dAtA []byte) error
- type ProducerMetadata
- type ReadCSVSpec
- func (*ReadCSVSpec) Descriptor() ([]byte, []int)
- func (m *ReadCSVSpec) Marshal() (dAtA []byte, err error)
- func (m *ReadCSVSpec) MarshalTo(dAtA []byte) (int, error)
- func (*ReadCSVSpec) ProtoMessage()
- func (m *ReadCSVSpec) Reset()
- func (m *ReadCSVSpec) Size() (n int)
- func (m *ReadCSVSpec) String() string
- func (m *ReadCSVSpec) Unmarshal(dAtA []byte) error
- type RemoteProducerMetadata
- func (*RemoteProducerMetadata) Descriptor() ([]byte, []int)
- func (m *RemoteProducerMetadata) GetError() *Error
- func (m *RemoteProducerMetadata) GetRangeInfo() *RemoteProducerMetadata_RangeInfos
- func (m *RemoteProducerMetadata) GetTraceData() *RemoteProducerMetadata_TraceData
- func (m *RemoteProducerMetadata) GetValue() isRemoteProducerMetadata_Value
- func (m *RemoteProducerMetadata) Marshal() (dAtA []byte, err error)
- func (m *RemoteProducerMetadata) MarshalTo(dAtA []byte) (int, error)
- func (*RemoteProducerMetadata) ProtoMessage()
- func (m *RemoteProducerMetadata) Reset()
- func (m *RemoteProducerMetadata) Size() (n int)
- func (m *RemoteProducerMetadata) String() string
- func (m *RemoteProducerMetadata) Unmarshal(dAtA []byte) error
- func (*RemoteProducerMetadata) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, ...)
- type RemoteProducerMetadata_Error
- type RemoteProducerMetadata_RangeInfo
- type RemoteProducerMetadata_RangeInfos
- func (*RemoteProducerMetadata_RangeInfos) Descriptor() ([]byte, []int)
- func (m *RemoteProducerMetadata_RangeInfos) Marshal() (dAtA []byte, err error)
- func (m *RemoteProducerMetadata_RangeInfos) MarshalTo(dAtA []byte) (int, error)
- func (*RemoteProducerMetadata_RangeInfos) ProtoMessage()
- func (m *RemoteProducerMetadata_RangeInfos) Reset()
- func (m *RemoteProducerMetadata_RangeInfos) Size() (n int)
- func (m *RemoteProducerMetadata_RangeInfos) String() string
- func (m *RemoteProducerMetadata_RangeInfos) Unmarshal(dAtA []byte) error
- type RemoteProducerMetadata_TraceData
- func (*RemoteProducerMetadata_TraceData) Descriptor() ([]byte, []int)
- func (m *RemoteProducerMetadata_TraceData) Marshal() (dAtA []byte, err error)
- func (m *RemoteProducerMetadata_TraceData) MarshalTo(dAtA []byte) (int, error)
- func (*RemoteProducerMetadata_TraceData) ProtoMessage()
- func (m *RemoteProducerMetadata_TraceData) Reset()
- func (m *RemoteProducerMetadata_TraceData) Size() (n int)
- func (m *RemoteProducerMetadata_TraceData) String() string
- func (m *RemoteProducerMetadata_TraceData) Unmarshal(dAtA []byte) error
- type RemoteProducerMetadata_TraceData_
- type RepeatableRowSource
- type RowBuffer
- func (rb *RowBuffer) ConsumerClosed()
- func (rb *RowBuffer) ConsumerDone()
- func (rb *RowBuffer) Next() (sqlbase.EncDatumRow, ProducerMetadata)
- func (rb *RowBuffer) ProducerDone()
- func (rb *RowBuffer) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
- func (rb *RowBuffer) Types() []sqlbase.ColumnType
- type RowBufferArgs
- type RowChannel
- func (rc *RowChannel) ConsumerClosed()
- func (rc *RowChannel) ConsumerDone()
- func (rc *RowChannel) Init(types []sqlbase.ColumnType)
- func (rc *RowChannel) InitWithBufSize(types []sqlbase.ColumnType, chanBufSize int)
- func (rc *RowChannel) Next() (sqlbase.EncDatumRow, ProducerMetadata)
- func (rc *RowChannel) ProducerDone()
- func (rc *RowChannel) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
- func (rc *RowChannel) Types() []sqlbase.ColumnType
- type RowChannelMsg
- type RowDisposer
- type RowReceiver
- type RowSource
- type SSTWriterSpec
- func (*SSTWriterSpec) Descriptor() ([]byte, []int)
- func (m *SSTWriterSpec) Marshal() (dAtA []byte, err error)
- func (m *SSTWriterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*SSTWriterSpec) ProtoMessage()
- func (m *SSTWriterSpec) Reset()
- func (m *SSTWriterSpec) Size() (n int)
- func (m *SSTWriterSpec) String() string
- func (m *SSTWriterSpec) Unmarshal(dAtA []byte) error
- type ServerConfig
- type ServerImpl
- func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error
- func (ds *ServerImpl) RunSyncFlow(stream DistSQL_RunSyncFlowServer) error
- func (ds *ServerImpl) SetupFlow(ctx context.Context, req *SetupFlowRequest) (*SimpleResponse, error)
- func (ds *ServerImpl) SetupSyncFlow(ctx context.Context, req *SetupFlowRequest, output RowReceiver) (context.Context, *Flow, error)
- func (ds *ServerImpl) Start()
- type SetupFlowRequest
- func (*SetupFlowRequest) Descriptor() ([]byte, []int)
- func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)
- func (m *SetupFlowRequest) MarshalTo(dAtA []byte) (int, error)
- func (*SetupFlowRequest) ProtoMessage()
- func (m *SetupFlowRequest) Reset()
- func (m *SetupFlowRequest) Size() (n int)
- func (m *SetupFlowRequest) String() string
- func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error
- type SimpleResponse
- func (*SimpleResponse) Descriptor() ([]byte, []int)
- func (m *SimpleResponse) Marshal() (dAtA []byte, err error)
- func (m *SimpleResponse) MarshalTo(dAtA []byte) (int, error)
- func (*SimpleResponse) ProtoMessage()
- func (m *SimpleResponse) Reset()
- func (m *SimpleResponse) Size() (n int)
- func (m *SimpleResponse) String() string
- func (m *SimpleResponse) Unmarshal(dAtA []byte) error
- type SorterSpec
- func (*SorterSpec) Descriptor() ([]byte, []int)
- func (m *SorterSpec) Marshal() (dAtA []byte, err error)
- func (m *SorterSpec) MarshalTo(dAtA []byte) (int, error)
- func (*SorterSpec) ProtoMessage()
- func (m *SorterSpec) Reset()
- func (m *SorterSpec) Size() (n int)
- func (m *SorterSpec) String() string
- func (m *SorterSpec) Unmarshal(dAtA []byte) error
- type StreamDecoder
- type StreamEncoder
- type StreamEndpointSpec
- func (*StreamEndpointSpec) Descriptor() ([]byte, []int)
- func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)
- func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)
- func (*StreamEndpointSpec) ProtoMessage()
- func (m *StreamEndpointSpec) Reset()
- func (m *StreamEndpointSpec) Size() (n int)
- func (m *StreamEndpointSpec) String() string
- func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error
- type StreamEndpointSpec_Type
- type StreamID
- type TableReaderSpan
- func (*TableReaderSpan) Descriptor() ([]byte, []int)
- func (m *TableReaderSpan) Marshal() (dAtA []byte, err error)
- func (m *TableReaderSpan) MarshalTo(dAtA []byte) (int, error)
- func (*TableReaderSpan) ProtoMessage()
- func (m *TableReaderSpan) Reset()
- func (m *TableReaderSpan) Size() (n int)
- func (m *TableReaderSpan) String() string
- func (m *TableReaderSpan) Unmarshal(dAtA []byte) error
- type TableReaderSpec
- func (*TableReaderSpec) Descriptor() ([]byte, []int)
- func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)
- func (m *TableReaderSpec) MarshalTo(dAtA []byte) (int, error)
- func (*TableReaderSpec) ProtoMessage()
- func (m *TableReaderSpec) Reset()
- func (m *TableReaderSpec) Size() (n int)
- func (m *TableReaderSpec) String() string
- func (m *TableReaderSpec) Unmarshal(dAtA []byte) error
- type TestingKnobs
- type ValuesCoreSpec
- func (*ValuesCoreSpec) Descriptor() ([]byte, []int)
- func (m *ValuesCoreSpec) Marshal() (dAtA []byte, err error)
- func (m *ValuesCoreSpec) MarshalTo(dAtA []byte) (int, error)
- func (*ValuesCoreSpec) ProtoMessage()
- func (m *ValuesCoreSpec) Reset()
- func (m *ValuesCoreSpec) Size() (n int)
- func (m *ValuesCoreSpec) String() string
- func (m *ValuesCoreSpec) Unmarshal(dAtA []byte) error
Constants ¶
const ( FlowNotStarted flowStatus = iota FlowRunning FlowFinished )
Flow status indicators.
Variables ¶
var ( ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") )
var ( ErrInvalidLengthData = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowData = fmt.Errorf("proto: integer overflow") )
var ( ErrInvalidLengthProcessors = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowProcessors = fmt.Errorf("proto: integer overflow") )
var AggregatorSpec_Func_name = map[int32]string{
0: "IDENT",
1: "AVG",
2: "BOOL_AND",
3: "BOOL_OR",
4: "CONCAT_AGG",
5: "COUNT",
7: "MAX",
8: "MIN",
9: "STDDEV",
10: "SUM",
11: "SUM_INT",
12: "VARIANCE",
13: "XOR_AGG",
14: "COUNT_ROWS",
}
var AggregatorSpec_Func_value = map[string]int32{
"IDENT": 0,
"AVG": 1,
"BOOL_AND": 2,
"BOOL_OR": 3,
"CONCAT_AGG": 4,
"COUNT": 5,
"MAX": 7,
"MIN": 8,
"STDDEV": 9,
"SUM": 10,
"SUM_INT": 11,
"VARIANCE": 12,
"XOR_AGG": 13,
"COUNT_ROWS": 14,
}
var AlgebraicSetOpSpec_SetOpType_name = map[int32]string{
0: "Except_all",
}
var AlgebraicSetOpSpec_SetOpType_value = map[string]int32{
"Except_all": 0,
}
var BackfillerSpec_Type_name = map[int32]string{
0: "Invalid",
1: "Column",
2: "Index",
}
var BackfillerSpec_Type_value = map[string]int32{
"Invalid": 0,
"Column": 1,
"Index": 2,
}
var InputSyncSpec_Type_name = map[int32]string{
0: "UNORDERED",
1: "ORDERED",
}
var InputSyncSpec_Type_value = map[string]int32{
"UNORDERED": 0,
"ORDERED": 1,
}
var JoinType_name = map[int32]string{
0: "INNER",
1: "LEFT_OUTER",
2: "RIGHT_OUTER",
3: "FULL_OUTER",
}
var JoinType_value = map[string]int32{
"INNER": 0,
"LEFT_OUTER": 1,
"RIGHT_OUTER": 2,
"FULL_OUTER": 3,
}
var NewReadCSVProcessor func(*FlowCtx, ReadCSVSpec, RowReceiver) (Processor, error)
NewReadCSVProcessor is externally implemented and registered by ccl/sqlccl/csv.go.
var NewSSTWriterProcessor func(*FlowCtx, SSTWriterSpec, RowSource, RowReceiver) (Processor, error)
NewSSTWriterProcessor is externally implemented and registered by ccl/sqlccl/csv.go.
var Ordering_Column_Direction_name = map[int32]string{
0: "ASC",
1: "DESC",
}
var Ordering_Column_Direction_value = map[string]int32{
"ASC": 0,
"DESC": 1,
}
var OutputRouterSpec_Type_name = map[int32]string{
0: "PASS_THROUGH",
1: "MIRROR",
2: "BY_HASH",
3: "BY_RANGE",
}
var OutputRouterSpec_Type_value = map[string]int32{
"PASS_THROUGH": 0,
"MIRROR": 1,
"BY_HASH": 2,
"BY_RANGE": 3,
}
var StreamEndpointSpec_Type_name = map[int32]string{
0: "LOCAL",
1: "REMOTE",
2: "SYNC_RESPONSE",
}
var StreamEndpointSpec_Type_value = map[string]int32{
"LOCAL": 0,
"REMOTE": 1,
"SYNC_RESPONSE": 2,
}
Functions ¶
func ColumnMutationFilter ¶
func ColumnMutationFilter(m sqlbase.DescriptorMutation) bool
ColumnMutationFilter is a filter that allows mutations that add or drop columns.
func CompareEncDatumRowForMerge ¶
func CompareEncDatumRowForMerge( lhs, rhs sqlbase.EncDatumRow, leftOrdering, rightOrdering sqlbase.ColumnOrdering, da *sqlbase.DatumAlloc, ) (int, error)
CompareEncDatumRowForMerge EncDatumRow compares two EncDatumRows for merging. When merging two streams and preserving the order (as in a MergeSort or a MergeJoin) compare the head of the streams, emitting the one that sorts first. It allows for the EncDatumRow to be nil if one of the streams is exhausted (and hence nil). CompareEncDatumRowForMerge returns 0 when both rows are nil, and a nil row is considered greater than any non-nil row. CompareEncDatumRowForMerge assumes that the two rows have the same columns in the same orders, but can handle different ordering directions. It takes a DatumAlloc which is used for decoding if any underlying EncDatum is not yet decoded.
func ConvertBackfillError ¶
func ConvertBackfillError( ctx context.Context, tableDesc *sqlbase.TableDescriptor, b *client.Batch, ) error
ConvertBackfillError returns a cleaner SQL error for a failed Batch.
func DrainAndClose ¶
func DrainAndClose(ctx context.Context, dst RowReceiver, cause error, srcs ...RowSource)
DrainAndClose is a version of DrainAndForwardMetadata that drains multiple sources. These sources are assumed to be the only producers left for dst, so dst is closed once they're all exhausted (this is different from DrainAndForwardMetadata).
If cause is specified, it is forwarded to the consumer before all the drain metadata. This is intended to have been the error, if any, that caused the draining.
srcs can be nil.
All errors are forwarded to the producer.
func DrainAndForwardMetadata ¶
func DrainAndForwardMetadata(ctx context.Context, src RowSource, dst RowReceiver)
DrainAndForwardMetadata calls src.ConsumerDone() (thus asking src for draining metadata) and then forwards all the metadata to dst.
When this returns, src has been properly closed (regardless of the presence or absence of an error). dst, however, has not been closed; someone else must call dst.ProducerDone() when all producers have finished draining.
It is OK to call DrainAndForwardMetadata() multiple times concurrently on the same dst (as RowReceiver.Push() is guaranteed to be thread safe).
TODO(andrei): errors seen while draining should be reported to the gateway, but they shouldn't fail a SQL query.
func FlowVerIsCompatible ¶
func FlowVerIsCompatible(flowVer, minAcceptedVersion, serverVersion DistSQLVersion) bool
FlowVerIsCompatible checks a flow's version is compatible with this node's DistSQL version.
func GeneratePlanDiagram ¶
GeneratePlanDiagram generates the json data for a flow diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.
func GeneratePlanDiagramWithURL ¶
GeneratePlanDiagramWithURL generates the json data for a flow diagram and a URL which encodes the diagram. There should be one FlowSpec per node. The function assumes that StreamIDs are unique across all flows.
func GetAggregateInfo ¶
func GetAggregateInfo( fn AggregatorSpec_Func, inputTypes ...sqlbase.ColumnType, ) ( aggregateConstructor func(*parser.EvalContext) parser.AggregateFunc, returnType sqlbase.ColumnType, err error, )
GetAggregateInfo returns the aggregate constructor and the return type for the given aggregate function when applied on the given type.
func GetResumeSpanIndexofMutationID ¶
func GetResumeSpanIndexofMutationID(tableDesc *sqlbase.TableDescriptor, mutationIdx int) int
GetResumeSpanIndexofMutationID returns the index of a resume span within a job that corresponds to the given mutation index and table descriptor.
func GetResumeSpansFromJob ¶
func GetResumeSpansFromJob( ctx context.Context, jobsRegistry *jobs.Registry, txn *client.Txn, jobID int64, mutationIdx int, ) ([]roachpb.Span, error)
GetResumeSpansFromJob returns a ResumeSpanList from a job given a job id and index.
func IndexMutationFilter ¶
func IndexMutationFilter(m sqlbase.DescriptorMutation) bool
IndexMutationFilter is a filter that allows mutations that add indexes.
func ProcessInboundStream ¶
func ProcessInboundStream( ctx context.Context, stream DistSQL_FlowStreamServer, firstMsg *ProducerMessage, dst RowReceiver, f *Flow, ) error
ProcessInboundStream receives rows from a DistSQL_FlowStreamServer and sends them to a RowReceiver. Optionally processes an initial StreamMessage that was already received (because the first message contains the flow and stream IDs, it needs to be received before we can get here).
func RegisterDistSQLServer ¶
func RegisterDistSQLServer(s *grpc.Server, srv DistSQLServer)
func SetResumeSpansInJob ¶
func SetResumeSpansInJob( ctx context.Context, spans []roachpb.Span, jobsRegistry *jobs.Registry, mutationIdx int, txn *client.Txn, jobID int64, ) error
SetResumeSpansInJob addeds a list of resume spans into a job details field.
func WriteResumeSpan ¶
func WriteResumeSpan( ctx context.Context, db *client.DB, id sqlbase.ID, origSpan roachpb.Span, resume roachpb.Span, mutationIdx int, jobsRegistry *jobs.Registry, ) error
WriteResumeSpan writes a checkpoint for the backfill work on origSpan. origSpan is the span of keys that were assigned to be backfilled, resume is the left over work from origSpan.
Types ¶
type AggregatorSpec ¶
type AggregatorSpec struct { // The group key is a subset of the columns in the input stream schema on the // basis of which we define our groups. GroupCols []uint32 `protobuf:"varint,2,rep,packed,name=group_cols,json=groupCols" json:"group_cols,omitempty"` Aggregations []AggregatorSpec_Aggregation `protobuf:"bytes,3,rep,name=aggregations" json:"aggregations"` }
AggregatorSpec is the specification for an "aggregator" (processor core type, not the logical plan computation stage). An aggregator performs 'aggregation' in the SQL sense in that it groups rows and computes an aggregate for each group. The group is configured using the group key. The aggregator can be configured with one or more aggregation functions.
The "internal columns" of an Aggregator map 1-1 to the aggregations.
func (*AggregatorSpec) Descriptor ¶
func (*AggregatorSpec) Descriptor() ([]byte, []int)
func (*AggregatorSpec) Marshal ¶
func (m *AggregatorSpec) Marshal() (dAtA []byte, err error)
func (*AggregatorSpec) ProtoMessage ¶
func (*AggregatorSpec) ProtoMessage()
func (*AggregatorSpec) Reset ¶
func (m *AggregatorSpec) Reset()
func (*AggregatorSpec) Size ¶
func (m *AggregatorSpec) Size() (n int)
func (*AggregatorSpec) String ¶
func (m *AggregatorSpec) String() string
func (*AggregatorSpec) Unmarshal ¶
func (m *AggregatorSpec) Unmarshal(dAtA []byte) error
type AggregatorSpec_Aggregation ¶
type AggregatorSpec_Aggregation struct { Func AggregatorSpec_Func `protobuf:"varint,1,opt,name=func,enum=cockroach.sql.distsqlrun.AggregatorSpec_Func" json:"func"` // Aggregation functions with distinct = true functions like you would // expect '<FUNC> DISTINCT' to operate, the default behaviour would be // the '<FUNC> ALL' operation. Distinct bool `protobuf:"varint,2,opt,name=distinct" json:"distinct"` // The column index specifies the argument(s) to the aggregator function. // // Currently no functions take more than one argument. // COUNT_ROWS takes no arguments. ColIdx []uint32 `protobuf:"varint,5,rep,name=col_idx,json=colIdx" json:"col_idx,omitempty"` // If set, this column index specifies a boolean argument; rows for which // this value is not true don't contribute to this aggregation. This enables // the filter clause, e.g.: // SELECT SUM(x) FILTER (WHERE y > 1), SUM(x) FILTER (WHERE y < 1) FROM t FilterColIdx *uint32 `protobuf:"varint,4,opt,name=filter_col_idx,json=filterColIdx" json:"filter_col_idx,omitempty"` // CRDB version 1.0 nodes were sending this field instead of the newer // col_idx. Generally, queries coming from these nodes are rejected through // the MinAcceptedVersion mechanism and so we shouldn't have to care about // this field, except when the --extra-1.0-compatibility flag is set; in // that case, this field is supported too. ColIdx_1_0 *uint32 `protobuf:"varint,3,opt,name=col_idx_1_0,json=colIdx10" json:"col_idx_1_0,omitempty"` }
func (*AggregatorSpec_Aggregation) Descriptor ¶
func (*AggregatorSpec_Aggregation) Descriptor() ([]byte, []int)
func (AggregatorSpec_Aggregation) Equals ¶
func (a AggregatorSpec_Aggregation) Equals(b AggregatorSpec_Aggregation) bool
Equals returns true if two aggregation specifiers are identical (and thus will always yield the same result).
func (*AggregatorSpec_Aggregation) Marshal ¶
func (m *AggregatorSpec_Aggregation) Marshal() (dAtA []byte, err error)
func (*AggregatorSpec_Aggregation) MarshalTo ¶
func (m *AggregatorSpec_Aggregation) MarshalTo(dAtA []byte) (int, error)
func (*AggregatorSpec_Aggregation) ProtoMessage ¶
func (*AggregatorSpec_Aggregation) ProtoMessage()
func (*AggregatorSpec_Aggregation) Reset ¶
func (m *AggregatorSpec_Aggregation) Reset()
func (*AggregatorSpec_Aggregation) Size ¶
func (m *AggregatorSpec_Aggregation) Size() (n int)
func (*AggregatorSpec_Aggregation) String ¶
func (m *AggregatorSpec_Aggregation) String() string
func (*AggregatorSpec_Aggregation) Unmarshal ¶
func (m *AggregatorSpec_Aggregation) Unmarshal(dAtA []byte) error
type AggregatorSpec_Func ¶
type AggregatorSpec_Func int32
These mirror the aggregate functions supported by sql/parser. See sql/parser/aggregate_builtins.go.
const ( // The identity function is set to be the default zero-value function, // returning the last value added. AggregatorSpec_IDENT AggregatorSpec_Func = 0 AggregatorSpec_AVG AggregatorSpec_Func = 1 AggregatorSpec_BOOL_AND AggregatorSpec_Func = 2 AggregatorSpec_BOOL_OR AggregatorSpec_Func = 3 AggregatorSpec_CONCAT_AGG AggregatorSpec_Func = 4 AggregatorSpec_COUNT AggregatorSpec_Func = 5 AggregatorSpec_MAX AggregatorSpec_Func = 7 AggregatorSpec_MIN AggregatorSpec_Func = 8 AggregatorSpec_STDDEV AggregatorSpec_Func = 9 AggregatorSpec_SUM AggregatorSpec_Func = 10 AggregatorSpec_SUM_INT AggregatorSpec_Func = 11 AggregatorSpec_VARIANCE AggregatorSpec_Func = 12 AggregatorSpec_XOR_AGG AggregatorSpec_Func = 13 AggregatorSpec_COUNT_ROWS AggregatorSpec_Func = 14 )
func (AggregatorSpec_Func) Enum ¶
func (x AggregatorSpec_Func) Enum() *AggregatorSpec_Func
func (AggregatorSpec_Func) EnumDescriptor ¶
func (AggregatorSpec_Func) EnumDescriptor() ([]byte, []int)
func (AggregatorSpec_Func) String ¶
func (x AggregatorSpec_Func) String() string
func (*AggregatorSpec_Func) UnmarshalJSON ¶
func (x *AggregatorSpec_Func) UnmarshalJSON(data []byte) error
type AlgebraicSetOpSpec ¶
type AlgebraicSetOpSpec struct { // If the two input streams are both ordered by a common column ordering, // that ordering can be used to optimize resource usage in the processor. Ordering Ordering `protobuf:"bytes,1,opt,name=ordering" json:"ordering"` OpType AlgebraicSetOpSpec_SetOpType `protobuf:"varint,2,opt,name=op_type,json=opType,enum=cockroach.sql.distsqlrun.AlgebraicSetOpSpec_SetOpType" json:"op_type"` }
AlgebraicSetOpSpec is a specification for algebraic set operations currently only the EXCEPT ALL set operation, but extensible to other set operations. INTERSECT ALL is implemented with HashJoinerSpec, and UNION ALL with a no-op processor. EXCEPT/INTERSECT/UNION use a DISTINCT processor at the end. The two input streams should have the same schema. The ordering of the left stream will be preserved in the output stream.
func (*AlgebraicSetOpSpec) Descriptor ¶
func (*AlgebraicSetOpSpec) Descriptor() ([]byte, []int)
func (*AlgebraicSetOpSpec) Marshal ¶
func (m *AlgebraicSetOpSpec) Marshal() (dAtA []byte, err error)
func (*AlgebraicSetOpSpec) MarshalTo ¶
func (m *AlgebraicSetOpSpec) MarshalTo(dAtA []byte) (int, error)
func (*AlgebraicSetOpSpec) ProtoMessage ¶
func (*AlgebraicSetOpSpec) ProtoMessage()
func (*AlgebraicSetOpSpec) Reset ¶
func (m *AlgebraicSetOpSpec) Reset()
func (*AlgebraicSetOpSpec) Size ¶
func (m *AlgebraicSetOpSpec) Size() (n int)
func (*AlgebraicSetOpSpec) String ¶
func (m *AlgebraicSetOpSpec) String() string
func (*AlgebraicSetOpSpec) Unmarshal ¶
func (m *AlgebraicSetOpSpec) Unmarshal(dAtA []byte) error
type AlgebraicSetOpSpec_SetOpType ¶
type AlgebraicSetOpSpec_SetOpType int32
const (
AlgebraicSetOpSpec_Except_all AlgebraicSetOpSpec_SetOpType = 0
)
func (AlgebraicSetOpSpec_SetOpType) Enum ¶
func (x AlgebraicSetOpSpec_SetOpType) Enum() *AlgebraicSetOpSpec_SetOpType
func (AlgebraicSetOpSpec_SetOpType) EnumDescriptor ¶
func (AlgebraicSetOpSpec_SetOpType) EnumDescriptor() ([]byte, []int)
func (AlgebraicSetOpSpec_SetOpType) String ¶
func (x AlgebraicSetOpSpec_SetOpType) String() string
func (*AlgebraicSetOpSpec_SetOpType) UnmarshalJSON ¶
func (x *AlgebraicSetOpSpec_SetOpType) UnmarshalJSON(data []byte) error
type BackfillerSpec ¶
type BackfillerSpec struct { Type BackfillerSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.BackfillerSpec_Type" json:"type"` Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,2,opt,name=table" json:"table"` // Sections of the table to be backfilled. Spans []TableReaderSpan `protobuf:"bytes,3,rep,name=spans" json:"spans"` // Run the backfill for approximately this duration. // The backfill will always process at least one backfill chunk. Duration time.Duration `protobuf:"varint,4,opt,name=duration,casttype=time.Duration" json:"duration"` // The backfill involves a complete table scan in chunks, // where each chunk is a transactional read of a set of rows // along with a backfill for the rows. This is the maximum number // of entries backfilled per chunk. ChunkSize int64 `protobuf:"varint,5,opt,name=chunk_size,json=chunkSize" json:"chunk_size"` // Any other (leased) table descriptors necessary for the // backfiller to do its job, such as the descriptors for tables with fk // relationships to the table being modified. OtherTables []cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,6,rep,name=other_tables,json=otherTables" json:"other_tables"` // The timestamp to perform index backfill historical scans at. ReadAsOf cockroach_util_hlc.Timestamp `protobuf:"bytes,7,opt,name=readAsOf" json:"readAsOf"` }
BackfillerSpec is the specification for a "schema change backfiller". The created backfill processor runs a backfill for the first mutations in the table descriptor mutation list with the same mutation id and type. A backfiller processor performs KV operations to retrieve rows for a table and backfills the new indexes/columns contained in the table descriptor. It checkpoints its progress by updating the table descriptor in the database, and doesn't emit any rows nor support any post-processing.
func (*BackfillerSpec) Descriptor ¶
func (*BackfillerSpec) Descriptor() ([]byte, []int)
func (*BackfillerSpec) Marshal ¶
func (m *BackfillerSpec) Marshal() (dAtA []byte, err error)
func (*BackfillerSpec) ProtoMessage ¶
func (*BackfillerSpec) ProtoMessage()
func (*BackfillerSpec) Reset ¶
func (m *BackfillerSpec) Reset()
func (*BackfillerSpec) Size ¶
func (m *BackfillerSpec) Size() (n int)
func (*BackfillerSpec) String ¶
func (m *BackfillerSpec) String() string
func (*BackfillerSpec) Unmarshal ¶
func (m *BackfillerSpec) Unmarshal(dAtA []byte) error
type BackfillerSpec_Type ¶
type BackfillerSpec_Type int32
const ( BackfillerSpec_Invalid BackfillerSpec_Type = 0 BackfillerSpec_Column BackfillerSpec_Type = 1 BackfillerSpec_Index BackfillerSpec_Type = 2 )
func (BackfillerSpec_Type) Enum ¶
func (x BackfillerSpec_Type) Enum() *BackfillerSpec_Type
func (BackfillerSpec_Type) EnumDescriptor ¶
func (BackfillerSpec_Type) EnumDescriptor() ([]byte, []int)
func (BackfillerSpec_Type) String ¶
func (x BackfillerSpec_Type) String() string
func (*BackfillerSpec_Type) UnmarshalJSON ¶
func (x *BackfillerSpec_Type) UnmarshalJSON(data []byte) error
type BufferedRecord ¶
type BufferedRecord struct { Row sqlbase.EncDatumRow Meta ProducerMetadata }
BufferedRecord represents a row or metadata record that has been buffered inside a RowBuffer.
type CancellableRowReceiver ¶
type CancellableRowReceiver interface { // SetCancelled sets this RowReceiver as cancelled. Subsequent Push()es (if any) // return a ConsumerStatus of ConsumerClosed. SetCancelled() }
CancellableRowReceiver is a special type of a RowReceiver that can be set to cancelled asynchronously (i.e. concurrently or after Push()es and ProducerDone()s). Once cancelled, subsequent Push()es return ConsumerClosed. Implemented by distSQLReceiver which is the final RowReceiver, and the origin point for propagation of ConsumerClosed consumer statuses.
type ConsumerHandshake ¶
type ConsumerHandshake struct { // consumer_scheduled is true if the flow that's consuming this stream has // already been scheduled and so it is ready to consume data. If this is // false, then the consumer has not yet been scheduled. In this case, the // server (i.e. the consumer node) will send another ConsumerHandshake with // consumer_scheduled = true when the consumer is finally scheduled (unless // the scheduling timeout fires first, in which case the stream will be // closed server-side). ConsumerScheduled bool `protobuf:"varint,1,opt,name=consumer_scheduled,json=consumerScheduled" json:"consumer_scheduled"` // If consumer_scheduled is false, then this indicates the deadline for the // scheduling of the consumer flow. If the flow is not scheduled within that // dealine, this stream will be disconnected by the server-side. ConsumerScheduleDeadline *time.Time `` /* 136-byte string literal not displayed */ // The server's DistSQL version range. Version DistSQLVersion `protobuf:"varint,3,opt,name=version,casttype=DistSQLVersion" json:"version"` MinAcceptedVersion DistSQLVersion `protobuf:"varint,4,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"` }
ConsumerHandshake is the first one or two message sent in the consumer->producer direction on a stream. It informs the producer about the status of the consumer flow. Introduced in version 1.1 for future use by producers.
func (*ConsumerHandshake) Descriptor ¶
func (*ConsumerHandshake) Descriptor() ([]byte, []int)
func (*ConsumerHandshake) Marshal ¶
func (m *ConsumerHandshake) Marshal() (dAtA []byte, err error)
func (*ConsumerHandshake) MarshalTo ¶
func (m *ConsumerHandshake) MarshalTo(dAtA []byte) (int, error)
func (*ConsumerHandshake) ProtoMessage ¶
func (*ConsumerHandshake) ProtoMessage()
func (*ConsumerHandshake) Reset ¶
func (m *ConsumerHandshake) Reset()
func (*ConsumerHandshake) Size ¶
func (m *ConsumerHandshake) Size() (n int)
func (*ConsumerHandshake) String ¶
func (m *ConsumerHandshake) String() string
func (*ConsumerHandshake) Unmarshal ¶
func (m *ConsumerHandshake) Unmarshal(dAtA []byte) error
type ConsumerSignal ¶
type ConsumerSignal struct { // The consumer is done (doesn't need to consume any more rows) and is asking // the producer to push whatever trailing metadata it has and close its // stream. DrainRequest *DrainRequest `protobuf:"bytes,1,opt,name=drain_request,json=drainRequest" json:"drain_request,omitempty"` // Used in the RunSyncFlow case; the first message on the client stream must // contain this message. SetupFlowRequest *SetupFlowRequest `protobuf:"bytes,2,opt,name=setup_flow_request,json=setupFlowRequest" json:"setup_flow_request,omitempty"` // Consumer->Producer handshake messages. See message definition. Handshake *ConsumerHandshake `protobuf:"bytes,3,opt,name=handshake" json:"handshake,omitempty"` }
ConsumerSignal are messages flowing from consumer to producer (so, from RPC server to client) for the FlowStream RPC.
func (*ConsumerSignal) Descriptor ¶
func (*ConsumerSignal) Descriptor() ([]byte, []int)
func (*ConsumerSignal) GetValue ¶
func (this *ConsumerSignal) GetValue() interface{}
func (*ConsumerSignal) Marshal ¶
func (m *ConsumerSignal) Marshal() (dAtA []byte, err error)
func (*ConsumerSignal) ProtoMessage ¶
func (*ConsumerSignal) ProtoMessage()
func (*ConsumerSignal) Reset ¶
func (m *ConsumerSignal) Reset()
func (*ConsumerSignal) SetValue ¶
func (this *ConsumerSignal) SetValue(value interface{}) bool
func (*ConsumerSignal) Size ¶
func (m *ConsumerSignal) Size() (n int)
func (*ConsumerSignal) String ¶
func (m *ConsumerSignal) String() string
func (*ConsumerSignal) Unmarshal ¶
func (m *ConsumerSignal) Unmarshal(dAtA []byte) error
type ConsumerStatus ¶
type ConsumerStatus uint32
ConsumerStatus is the type returned by RowReceiver.Push(), informing a producer of a consumer's state.
const ( // NeedMoreRows indicates that the consumer is still expecting more rows. NeedMoreRows ConsumerStatus = iota // DrainRequested indicates that the consumer will not process any more data // rows, but will accept trailing metadata from the producer. DrainRequested // ConsumerClosed indicates that the consumer will not process any more data // rows or metadata. This is also commonly returned in case the consumer has // encountered an error. ConsumerClosed )
type DatumInfo ¶
type DatumInfo struct { Encoding cockroach_sql_sqlbase2.DatumEncoding `protobuf:"varint,1,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"` Type cockroach_sql_sqlbase1.ColumnType `protobuf:"bytes,2,opt,name=type" json:"type"` }
func (*DatumInfo) Descriptor ¶
func (*DatumInfo) ProtoMessage ¶
func (*DatumInfo) ProtoMessage()
type DistSQLClient ¶
type DistSQLClient interface { // RunSyncFlow instantiates a flow and streams back results of that flow. // The request must contain one flow, and that flow must have a single mailbox // of the special sync response type. RunSyncFlow(ctx context.Context, opts ...grpc.CallOption) (DistSQL_RunSyncFlowClient, error) // SetupFlow instantiates a flow (subgraphs of a distributed SQL // computation) on the receiving node. SetupFlow(ctx context.Context, in *SetupFlowRequest, opts ...grpc.CallOption) (*SimpleResponse, error) // FlowStream is used to push a stream of messages that is part of a flow. The // first message will have a StreamHeader which identifies the flow and the // stream (mailbox). // // The response is a stream that the consumer uses to perform a handshake and // to signal the producer when it wants it to start draining. The client (i.e. // the producer) will read from this consumer->producer stream until it has // sent everything it needs to send and it performs CloseSend() on the // producer->consumer stream; after that point the producer isn't listening // for consumer signals any more. FlowStream(ctx context.Context, opts ...grpc.CallOption) (DistSQL_FlowStreamClient, error) }
func NewDistSQLClient ¶
func NewDistSQLClient(cc *grpc.ClientConn) DistSQLClient
type DistSQLMetrics ¶
type DistSQLMetrics struct { QueriesActive *metric.Gauge QueriesTotal *metric.Counter FlowsActive *metric.Gauge FlowsTotal *metric.Counter MaxBytesHist *metric.Histogram CurBytesCount *metric.Counter }
DistSQLMetrics contains pointers to the metrics for monitoring DistSQL processing.
func MakeDistSQLMetrics ¶
func MakeDistSQLMetrics(histogramWindow time.Duration) DistSQLMetrics
MakeDistSQLMetrics instantiates the metrics holder for DistSQL monitoring.
func (*DistSQLMetrics) FlowStart ¶
func (m *DistSQLMetrics) FlowStart()
FlowStart registers the start of a new DistSQL flow.
func (*DistSQLMetrics) FlowStop ¶
func (m *DistSQLMetrics) FlowStop()
FlowStop registers the end of a DistSQL flow.
func (DistSQLMetrics) MetricStruct ¶
func (DistSQLMetrics) MetricStruct()
MetricStruct implements the metrics.Struct interface.
func (*DistSQLMetrics) QueryStart ¶
func (m *DistSQLMetrics) QueryStart()
QueryStart registers the start of a new DistSQL query.
func (*DistSQLMetrics) QueryStop ¶
func (m *DistSQLMetrics) QueryStop()
QueryStop registers the end of a DistSQL query.
type DistSQLServer ¶
type DistSQLServer interface { // RunSyncFlow instantiates a flow and streams back results of that flow. // The request must contain one flow, and that flow must have a single mailbox // of the special sync response type. RunSyncFlow(DistSQL_RunSyncFlowServer) error // SetupFlow instantiates a flow (subgraphs of a distributed SQL // computation) on the receiving node. SetupFlow(context.Context, *SetupFlowRequest) (*SimpleResponse, error) // FlowStream is used to push a stream of messages that is part of a flow. The // first message will have a StreamHeader which identifies the flow and the // stream (mailbox). // // The response is a stream that the consumer uses to perform a handshake and // to signal the producer when it wants it to start draining. The client (i.e. // the producer) will read from this consumer->producer stream until it has // sent everything it needs to send and it performs CloseSend() on the // producer->consumer stream; after that point the producer isn't listening // for consumer signals any more. FlowStream(DistSQL_FlowStreamServer) error }
type DistSQLVersion ¶
type DistSQLVersion uint32
DistSQLVersion identifies DistSQL engine versions.
const Version DistSQLVersion = 6
Version identifies the distsqlrun protocol version.
This version is separate from the main CockroachDB version numbering; it is only changed when the distsqlrun API changes.
The planner populates the version in SetupFlowRequest. A server only accepts requests with versions in the range MinAcceptedVersion to Version.
Is is possible used to provide a "window" of compatibility when new features are added. Example:
- we start with Version=1; distsqlrun servers with version 1 only accept requests with version 1.
- a new distsqlrun feature is added; Version is bumped to 2. The planner does not yet use this feature by default; it still issues requests with version 1.
- MinAcceptedVersion is still 1, i.e. servers with version 2 accept both versions 1 and 2.
- after an upgrade cycle, we can enable the feature in the planner, requiring version 2.
- at some later point, we can choose to deprecate version 1 and have servers only accept versions >= 2 (by setting MinAcceptedVersion to 2).
ATTENTION: When updating these fields, add to version_history.txt explaining what changed.
var MinAcceptedVersion DistSQLVersion = 6
MinAcceptedVersion is the oldest version that the server is compatible with; see above.
type DistSQLVersionGossipInfo ¶
type DistSQLVersionGossipInfo struct { Version DistSQLVersion `protobuf:"varint,1,opt,name=version,casttype=DistSQLVersion" json:"version"` MinAcceptedVersion DistSQLVersion `protobuf:"varint,2,opt,name=min_accepted_version,json=minAcceptedVersion,casttype=DistSQLVersion" json:"min_accepted_version"` }
DistSQLVersionGossipInfo represents the DistSQL server version information that gets gossiped for each node. This is used by planners to avoid planning on nodes with incompatible version during rolling cluster updates.
For the meaning of the fields, see the corresponding constants in distsqlrun/server.go.
func (*DistSQLVersionGossipInfo) Descriptor ¶
func (*DistSQLVersionGossipInfo) Descriptor() ([]byte, []int)
func (*DistSQLVersionGossipInfo) Marshal ¶
func (m *DistSQLVersionGossipInfo) Marshal() (dAtA []byte, err error)
func (*DistSQLVersionGossipInfo) MarshalTo ¶
func (m *DistSQLVersionGossipInfo) MarshalTo(dAtA []byte) (int, error)
func (*DistSQLVersionGossipInfo) ProtoMessage ¶
func (*DistSQLVersionGossipInfo) ProtoMessage()
func (*DistSQLVersionGossipInfo) Reset ¶
func (m *DistSQLVersionGossipInfo) Reset()
func (*DistSQLVersionGossipInfo) Size ¶
func (m *DistSQLVersionGossipInfo) Size() (n int)
func (*DistSQLVersionGossipInfo) String ¶
func (m *DistSQLVersionGossipInfo) String() string
func (*DistSQLVersionGossipInfo) Unmarshal ¶
func (m *DistSQLVersionGossipInfo) Unmarshal(dAtA []byte) error
type DistSQL_FlowStreamClient ¶
type DistSQL_FlowStreamClient interface { Send(*ProducerMessage) error Recv() (*ConsumerSignal, error) grpc.ClientStream }
type DistSQL_FlowStreamServer ¶
type DistSQL_FlowStreamServer interface { Send(*ConsumerSignal) error Recv() (*ProducerMessage, error) grpc.ServerStream }
type DistSQL_RunSyncFlowClient ¶
type DistSQL_RunSyncFlowClient interface { Send(*ConsumerSignal) error Recv() (*ProducerMessage, error) grpc.ClientStream }
type DistSQL_RunSyncFlowServer ¶
type DistSQL_RunSyncFlowServer interface { Send(*ProducerMessage) error Recv() (*ConsumerSignal, error) grpc.ServerStream }
type DistinctSpec ¶
type DistinctSpec struct { // The ordered columns in the input stream can be optionally specified for // possible optimizations. The specific ordering (ascending/descending) of // the column itself is not important nor is the order in which the columns // are specified. OrderedColumns []uint32 `protobuf:"varint,1,rep,name=ordered_columns,json=orderedColumns" json:"ordered_columns,omitempty"` // The distinct columns in the input stream are those columns on which we // check for distinct rows. If A,B,C are in distinct_columns and there is a // 4th column D which is not included in distinct_columns, its values are not // considered, so rows A1,B1,C1,D1 and A1,B1,C1,D2 are considered equal and // only one of them (the first) is output. DistinctColumns []uint32 `protobuf:"varint,2,rep,name=distinct_columns,json=distinctColumns" json:"distinct_columns,omitempty"` }
func (*DistinctSpec) Descriptor ¶
func (*DistinctSpec) Descriptor() ([]byte, []int)
func (*DistinctSpec) Marshal ¶
func (m *DistinctSpec) Marshal() (dAtA []byte, err error)
func (*DistinctSpec) ProtoMessage ¶
func (*DistinctSpec) ProtoMessage()
func (*DistinctSpec) Reset ¶
func (m *DistinctSpec) Reset()
func (*DistinctSpec) Size ¶
func (m *DistinctSpec) Size() (n int)
func (*DistinctSpec) String ¶
func (m *DistinctSpec) String() string
func (*DistinctSpec) Unmarshal ¶
func (m *DistinctSpec) Unmarshal(dAtA []byte) error
type DrainRequest ¶
type DrainRequest struct { }
func (*DrainRequest) Descriptor ¶
func (*DrainRequest) Descriptor() ([]byte, []int)
func (*DrainRequest) Marshal ¶
func (m *DrainRequest) Marshal() (dAtA []byte, err error)
func (*DrainRequest) ProtoMessage ¶
func (*DrainRequest) ProtoMessage()
func (*DrainRequest) Reset ¶
func (m *DrainRequest) Reset()
func (*DrainRequest) Size ¶
func (m *DrainRequest) Size() (n int)
func (*DrainRequest) String ¶
func (m *DrainRequest) String() string
func (*DrainRequest) Unmarshal ¶
func (m *DrainRequest) Unmarshal(dAtA []byte) error
type Error ¶
type Error struct { // Types that are valid to be assigned to Detail: // *Error_PGError // *Error_RetryableTxnError Detail isError_Detail `protobuf_oneof:"detail"` }
Error is a generic representation including a string message.
func NewError ¶
NewError creates an Error from an error, to be sent on the wire. It will recognize certain errors and marshall them accordingly, and everything unrecognized is turned into a PGError with code "internal".
func (*Error) Descriptor ¶
func (*Error) ErrorDetail ¶
ErrorDetail returns the payload as a Go error.
func (*Error) GetPGError ¶
func (m *Error) GetPGError() *cockroach_pgerror.Error
func (*Error) GetRetryableTxnError ¶
func (m *Error) GetRetryableTxnError() *cockroach_roachpb2.UnhandledRetryableError
func (*Error) ProtoMessage ¶
func (*Error) ProtoMessage()
type Error_PGError ¶
type Error_PGError struct {
PGError *cockroach_pgerror.Error `protobuf:"bytes,1,opt,name=pg_error,json=pgError,oneof"`
}
func (*Error_PGError) Size ¶
func (m *Error_PGError) Size() (n int)
type Error_RetryableTxnError ¶
type Error_RetryableTxnError struct {
RetryableTxnError *cockroach_roachpb2.UnhandledRetryableError `protobuf:"bytes,2,opt,name=retryableTxnError,oneof"`
}
func (*Error_RetryableTxnError) MarshalTo ¶
func (m *Error_RetryableTxnError) MarshalTo(dAtA []byte) (int, error)
func (*Error_RetryableTxnError) Size ¶
func (m *Error_RetryableTxnError) Size() (n int)
type EvalContext ¶
type EvalContext struct { StmtTimestampNanos int64 `protobuf:"varint,1,opt,name=stmtTimestampNanos" json:"stmtTimestampNanos"` TxnTimestampNanos int64 `protobuf:"varint,2,opt,name=txnTimestampNanos" json:"txnTimestampNanos"` ClusterTimestamp cockroach_util_hlc.Timestamp `protobuf:"bytes,3,opt,name=clusterTimestamp" json:"clusterTimestamp"` // The name of the location according to whose current timezone we're going to // parse timestamps. Used to init EvalContext.Location. Location string `protobuf:"bytes,4,opt,name=location" json:"location"` Database string `protobuf:"bytes,5,opt,name=database" json:"database"` SearchPath []string `protobuf:"bytes,6,rep,name=searchPath" json:"searchPath,omitempty"` User string `protobuf:"bytes,7,opt,name=user" json:"user"` }
EvalContext is used to marshall some planner.EvalContext members.
func MakeEvalContext ¶
func MakeEvalContext(evalCtx parser.EvalContext) EvalContext
MakeEvalContext serializes some of the fields of a parser.EvalContext into a distsqlrun.EvalContext proto.
func (*EvalContext) Descriptor ¶
func (*EvalContext) Descriptor() ([]byte, []int)
func (*EvalContext) Marshal ¶
func (m *EvalContext) Marshal() (dAtA []byte, err error)
func (*EvalContext) ProtoMessage ¶
func (*EvalContext) ProtoMessage()
func (*EvalContext) Reset ¶
func (m *EvalContext) Reset()
func (*EvalContext) Size ¶
func (m *EvalContext) Size() (n int)
func (*EvalContext) String ¶
func (m *EvalContext) String() string
func (*EvalContext) Unmarshal ¶
func (m *EvalContext) Unmarshal(dAtA []byte) error
type Expression ¶
type Expression struct { // TODO(radu): TBD how this will be used Version string `protobuf:"bytes,1,opt,name=version" json:"version"` // SQL expressions are passed as a string, with ordinal references // (@1, @2, @3 ..) used for "input" variables. Expr string `protobuf:"bytes,2,opt,name=expr" json:"expr"` }
func (*Expression) Descriptor ¶
func (*Expression) Descriptor() ([]byte, []int)
func (*Expression) Marshal ¶
func (m *Expression) Marshal() (dAtA []byte, err error)
func (*Expression) ProtoMessage ¶
func (*Expression) ProtoMessage()
func (*Expression) Reset ¶
func (m *Expression) Reset()
func (*Expression) Size ¶
func (m *Expression) Size() (n int)
func (*Expression) String ¶
func (m *Expression) String() string
func (*Expression) Unmarshal ¶
func (m *Expression) Unmarshal(dAtA []byte) error
type Flow ¶
type Flow struct { FlowCtx // contains filtered or unexported fields }
Flow represents a flow which consists of processors and streams.
func (*Flow) Cleanup ¶
Cleanup should be called when the flow completes (after all processors and mailboxes exited).
func (*Flow) RunSync ¶
RunSync runs the processors in the flow in order (serially), in the same context (no goroutines are spawned).
func (*Flow) Start ¶
Start starts the flow (each processor runs in their own goroutine).
Generally if errors are encountered during the setup part, they're returned. But if the flow is a synchronous one, then no error is returned; instead the setup error is pushed to the syncFlowConsumer. In this case, a subsequent call to f.Wait() will not block.
type FlowCtx ¶
type FlowCtx struct { log.AmbientContext Settings *cluster.Settings // EvalCtx is used by all the processors in the flow to evaluate expressions. EvalCtx parser.EvalContext // TempStorage is used by some DistSQL processors to store Rows when the // working set is larger than can be stored in memory. TempStorage engine.Engine // JobRegistry is used during backfill to load jobs which keep state. JobRegistry *jobs.Registry // contains filtered or unexported fields }
FlowCtx encompasses the contexts needed for various flow components.
type FlowID ¶
FlowID identifies a flow. It is most importantly used when setting up streams between nodes.
type FlowSpec ¶
type FlowSpec struct { FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"` // The NodeID of the gateway that planned this Flow. Used for debugging. Gateway github_com_cockroachdb_cockroach_pkg_roachpb.NodeID `protobuf:"varint,3,opt,name=gateway,casttype=github.com/cockroachdb/cockroach/pkg/roachpb.NodeID" json:"gateway"` Processors []ProcessorSpec `protobuf:"bytes,2,rep,name=processors" json:"processors"` }
FlowSpec describes a "flow" which is a subgraph of a distributed SQL computation consisting of processors and streams.
func (*FlowSpec) Descriptor ¶
func (*FlowSpec) ProtoMessage ¶
func (*FlowSpec) ProtoMessage()
type HashJoinerSpec ¶
type HashJoinerSpec struct { // The join constraints certain columns from the left stream to equal // corresponding columns on the right stream. These must have the same length. LeftEqColumns []uint32 `protobuf:"varint,1,rep,packed,name=left_eq_columns,json=leftEqColumns" json:"left_eq_columns,omitempty"` RightEqColumns []uint32 `protobuf:"varint,2,rep,packed,name=right_eq_columns,json=rightEqColumns" json:"right_eq_columns,omitempty"` // "ON" expression (in addition to the equality constraints captured by the // orderings). Assuming that the left stream has N columns and the right // stream has M columns, in this expression variables @1 to @N refer to // columns of the left stream and variables @N to @(N+M) refer to columns in // the right stream. // Having "ON" expression implies no merged columns. OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"` Type JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"` // Extra merged columns that are added in case of OUTER JOINS. These // columns occupy first positions in a row amd it's the left value if it's not // NULL, otherwise it's the right value. In INNER JOIN case no merged columns are // needed since left stream values are guaranteed to be not NULL. MergedColumns bool `protobuf:"varint,7,opt,name=merged_columns,json=mergedColumns" json:"merged_columns"` }
HashJoinerSpec is the specification for a hash join processor. The processor has two inputs and one output.
The processor works by reading the entire right input and putting it in a hash table. Thus, there is no guarantee on the ordering of results that stem only from the right input (in the case of RIGHT_OUTER, FULL_OUTER). However, it is guaranteed that results that involve the left stream preserve the ordering; i.e. all results that stem from left row (i) precede results that stem from left row (i+1).
The "internal columns" of a HashJoiner (see ProcessorSpec) are the concatenation of merged columns (if present), left input columns and right input columns. Each merged column corresponds to a left and a right equality column; its value is the value on the left if it is not NULL, otherwise it is the value on the right. There are either zero or E=len(left_eq_columns)=len(right_eq_columns) merged columns.
If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side. If merged columns are present, they occupy first E positions followed by N values from the left side and M values from the right side.
func (*HashJoinerSpec) Descriptor ¶
func (*HashJoinerSpec) Descriptor() ([]byte, []int)
func (*HashJoinerSpec) Marshal ¶
func (m *HashJoinerSpec) Marshal() (dAtA []byte, err error)
func (*HashJoinerSpec) ProtoMessage ¶
func (*HashJoinerSpec) ProtoMessage()
func (*HashJoinerSpec) Reset ¶
func (m *HashJoinerSpec) Reset()
func (*HashJoinerSpec) Size ¶
func (m *HashJoinerSpec) Size() (n int)
func (*HashJoinerSpec) String ¶
func (m *HashJoinerSpec) String() string
func (*HashJoinerSpec) Unmarshal ¶
func (m *HashJoinerSpec) Unmarshal(dAtA []byte) error
type InputSyncSpec ¶
type InputSyncSpec struct { Type InputSyncSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.InputSyncSpec_Type" json:"type"` Ordering Ordering `protobuf:"bytes,2,opt,name=ordering" json:"ordering"` Streams []StreamEndpointSpec `protobuf:"bytes,3,rep,name=streams" json:"streams"` // Schema for the streams entering this synchronizer. ColumnTypes []cockroach_sql_sqlbase1.ColumnType `protobuf:"bytes,4,rep,name=column_types,json=columnTypes" json:"column_types"` }
InputSyncSpec is the specification for an input synchronizer; it decides how to interleave rows from multiple input streams.
func (*InputSyncSpec) Descriptor ¶
func (*InputSyncSpec) Descriptor() ([]byte, []int)
func (*InputSyncSpec) Marshal ¶
func (m *InputSyncSpec) Marshal() (dAtA []byte, err error)
func (*InputSyncSpec) ProtoMessage ¶
func (*InputSyncSpec) ProtoMessage()
func (*InputSyncSpec) Reset ¶
func (m *InputSyncSpec) Reset()
func (*InputSyncSpec) Size ¶
func (m *InputSyncSpec) Size() (n int)
func (*InputSyncSpec) String ¶
func (m *InputSyncSpec) String() string
func (*InputSyncSpec) Unmarshal ¶
func (m *InputSyncSpec) Unmarshal(dAtA []byte) error
type InputSyncSpec_Type ¶
type InputSyncSpec_Type int32
const ( // Rows from the input streams are interleaved arbitrarily. InputSyncSpec_UNORDERED InputSyncSpec_Type = 0 // The input streams are guaranteed to be ordered according to the column // ordering field; rows from the streams are interleaved to preserve that // ordering. InputSyncSpec_ORDERED InputSyncSpec_Type = 1 )
func (InputSyncSpec_Type) Enum ¶
func (x InputSyncSpec_Type) Enum() *InputSyncSpec_Type
func (InputSyncSpec_Type) EnumDescriptor ¶
func (InputSyncSpec_Type) EnumDescriptor() ([]byte, []int)
func (InputSyncSpec_Type) String ¶
func (x InputSyncSpec_Type) String() string
func (*InputSyncSpec_Type) UnmarshalJSON ¶
func (x *InputSyncSpec_Type) UnmarshalJSON(data []byte) error
type JoinReaderSpec ¶
type JoinReaderSpec struct { Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"` // If 0, we use the primary index; each row in the input stream has a value // for each primary key. // TODO(radu): figure out the correct semantics when joining with an index. IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"` }
JoinReaderSpec is the specification for a "join reader". A join reader performs KV operations to retrieve specific rows that correspond to the values in the input stream (join by lookup).
The "internal columns" of a JoinReader (see ProcessorSpec) are all the columns of the table. Internally, only the values for the columns needed by the post-processing stage are be populated.
func (*JoinReaderSpec) Descriptor ¶
func (*JoinReaderSpec) Descriptor() ([]byte, []int)
func (*JoinReaderSpec) Marshal ¶
func (m *JoinReaderSpec) Marshal() (dAtA []byte, err error)
func (*JoinReaderSpec) ProtoMessage ¶
func (*JoinReaderSpec) ProtoMessage()
func (*JoinReaderSpec) Reset ¶
func (m *JoinReaderSpec) Reset()
func (*JoinReaderSpec) Size ¶
func (m *JoinReaderSpec) Size() (n int)
func (*JoinReaderSpec) String ¶
func (m *JoinReaderSpec) String() string
func (*JoinReaderSpec) Unmarshal ¶
func (m *JoinReaderSpec) Unmarshal(dAtA []byte) error
type JoinType ¶
type JoinType int32
func (JoinType) EnumDescriptor ¶
func (*JoinType) UnmarshalJSON ¶
type MergeJoinerSpec ¶
type MergeJoinerSpec struct { // The streams must be ordered according to the columns that have equality // constraints. The first column of the left ordering is constrained to be // equal to the first column in the right ordering and so on. The ordering // lengths and directions must match. // In the example above, left ordering describes C1+,C2- and right ordering // describes C5+,C4-. LeftOrdering Ordering `protobuf:"bytes,1,opt,name=left_ordering,json=leftOrdering" json:"left_ordering"` RightOrdering Ordering `protobuf:"bytes,2,opt,name=right_ordering,json=rightOrdering" json:"right_ordering"` // "ON" expression (in addition to the equality constraints captured by the // orderings). Assuming that the left stream has N columns and the right // stream has M columns, in this expression ordinal references @1 to @N refer // to columns of the left stream and variables @(N+1) to @(N+M) refer to // columns in the right stream. OnExpr Expression `protobuf:"bytes,5,opt,name=on_expr,json=onExpr" json:"on_expr"` Type JoinType `protobuf:"varint,6,opt,name=type,enum=cockroach.sql.distsqlrun.JoinType" json:"type"` }
MergeJoinerSpec is the specification for a merge join processor. The processor has two inputs and one output. The inputs must have the same ordering on the columns that have equality constraints. For example:
SELECT * FROM T1 INNER JOIN T2 ON T1.C1 = T2.C5 AND T1.C2 = T2.C4
To perform a merge join, the streams corresponding to T1 and T2 must have the same ordering on columns C1, C2 and C5, C4 respectively. For example: C1+,C2- and C5+,C4-.
The "internal columns" of a MergeJoiner (see ProcessorSpec) are the concatenation of left input columns and right input columns. If the left input has N columns and the right input has M columns, the first N columns contain values from the left side and the following M columns contain values from the right side.
func (*MergeJoinerSpec) Descriptor ¶
func (*MergeJoinerSpec) Descriptor() ([]byte, []int)
func (*MergeJoinerSpec) Marshal ¶
func (m *MergeJoinerSpec) Marshal() (dAtA []byte, err error)
func (*MergeJoinerSpec) ProtoMessage ¶
func (*MergeJoinerSpec) ProtoMessage()
func (*MergeJoinerSpec) Reset ¶
func (m *MergeJoinerSpec) Reset()
func (*MergeJoinerSpec) Size ¶
func (m *MergeJoinerSpec) Size() (n int)
func (*MergeJoinerSpec) String ¶
func (m *MergeJoinerSpec) String() string
func (*MergeJoinerSpec) Unmarshal ¶
func (m *MergeJoinerSpec) Unmarshal(dAtA []byte) error
type MultiplexedRowChannel ¶
type MultiplexedRowChannel struct {
// contains filtered or unexported fields
}
MultiplexedRowChannel is a RowChannel wrapper which allows multiple row producers to push rows on the same channel.
func (*MultiplexedRowChannel) ConsumerClosed ¶
func (mrc *MultiplexedRowChannel) ConsumerClosed()
ConsumerClosed is part of the RowSource interface.
func (*MultiplexedRowChannel) ConsumerDone ¶
func (mrc *MultiplexedRowChannel) ConsumerDone()
ConsumerDone is part of the RowSource interface.
func (*MultiplexedRowChannel) Init ¶
func (mrc *MultiplexedRowChannel) Init(numSenders int, types []sqlbase.ColumnType)
Init initializes the MultiplexedRowChannel with the default buffer size.
func (*MultiplexedRowChannel) Next ¶
func (mrc *MultiplexedRowChannel) Next() (row sqlbase.EncDatumRow, meta ProducerMetadata)
Next is part of the RowSource interface.
func (*MultiplexedRowChannel) ProducerDone ¶
func (mrc *MultiplexedRowChannel) ProducerDone()
ProducerDone is part of the RowReceiver interface.
func (*MultiplexedRowChannel) Push ¶
func (mrc *MultiplexedRowChannel) Push( row sqlbase.EncDatumRow, meta ProducerMetadata, ) ConsumerStatus
Push is part of the RowReceiver interface.
func (*MultiplexedRowChannel) Types ¶
func (mrc *MultiplexedRowChannel) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type MutationFilter ¶
type MutationFilter func(sqlbase.DescriptorMutation) bool
MutationFilter is the type of a simple predicate on a mutation.
type NoMetadataRowSource ¶
type NoMetadataRowSource struct {
// contains filtered or unexported fields
}
NoMetadataRowSource is a wrapper on top of a RowSource that automatically forwards metadata to a RowReceiver. Data rows are returned through an interface similar to RowSource, except that, since metadata is taken care of, only the data rows are returned.
The point of this struct is that it'd be burdensome for some row consumers to have to deal with metadata.
func MakeNoMetadataRowSource ¶
func MakeNoMetadataRowSource(src RowSource, sink RowReceiver) NoMetadataRowSource
MakeNoMetadataRowSource builds a NoMetadataRowSource.
func (*NoMetadataRowSource) NextRow ¶
func (rs *NoMetadataRowSource) NextRow() (sqlbase.EncDatumRow, error)
NextRow is analogous to RowSource.Next. If the producer sends an error, we can't just forward it to metadataSink. We need to let the consumer know so that it's not under the impression that everything is hunky-dory and it can continue consuming rows. So, this interface returns the error. Just like with a raw RowSource, the consumer should generally call ConsumerDone() and drain.
type NoopCoreSpec ¶
type NoopCoreSpec struct { }
NoopCoreSpec indicates a "no-op" processor core. This is used when we just need post-processing or when only a synchronizer is required (e.g. at the final endpoint).
func (*NoopCoreSpec) Descriptor ¶
func (*NoopCoreSpec) Descriptor() ([]byte, []int)
func (*NoopCoreSpec) Marshal ¶
func (m *NoopCoreSpec) Marshal() (dAtA []byte, err error)
func (*NoopCoreSpec) ProtoMessage ¶
func (*NoopCoreSpec) ProtoMessage()
func (*NoopCoreSpec) Reset ¶
func (m *NoopCoreSpec) Reset()
func (*NoopCoreSpec) Size ¶
func (m *NoopCoreSpec) Size() (n int)
func (*NoopCoreSpec) String ¶
func (m *NoopCoreSpec) String() string
func (*NoopCoreSpec) Unmarshal ¶
func (m *NoopCoreSpec) Unmarshal(dAtA []byte) error
type Ordering ¶
type Ordering struct {
Columns []Ordering_Column `protobuf:"bytes,1,rep,name=columns" json:"columns"`
}
Ordering defines an order - specifically a list of column indices and directions. See sqlbase.ColumnOrdering.
func (*Ordering) Descriptor ¶
func (*Ordering) ProtoMessage ¶
func (*Ordering) ProtoMessage()
type Ordering_Column ¶
type Ordering_Column struct { ColIdx uint32 `protobuf:"varint,1,opt,name=col_idx,json=colIdx" json:"col_idx"` Direction Ordering_Column_Direction `protobuf:"varint,2,opt,name=direction,enum=cockroach.sql.distsqlrun.Ordering_Column_Direction" json:"direction"` }
func (*Ordering_Column) Descriptor ¶
func (*Ordering_Column) Descriptor() ([]byte, []int)
func (*Ordering_Column) Marshal ¶
func (m *Ordering_Column) Marshal() (dAtA []byte, err error)
func (*Ordering_Column) ProtoMessage ¶
func (*Ordering_Column) ProtoMessage()
func (*Ordering_Column) Reset ¶
func (m *Ordering_Column) Reset()
func (*Ordering_Column) Size ¶
func (m *Ordering_Column) Size() (n int)
func (*Ordering_Column) String ¶
func (m *Ordering_Column) String() string
func (*Ordering_Column) Unmarshal ¶
func (m *Ordering_Column) Unmarshal(dAtA []byte) error
type Ordering_Column_Direction ¶
type Ordering_Column_Direction int32
The direction of the desired ordering for a column.
const ( Ordering_Column_ASC Ordering_Column_Direction = 0 Ordering_Column_DESC Ordering_Column_Direction = 1 )
func (Ordering_Column_Direction) Enum ¶
func (x Ordering_Column_Direction) Enum() *Ordering_Column_Direction
func (Ordering_Column_Direction) EnumDescriptor ¶
func (Ordering_Column_Direction) EnumDescriptor() ([]byte, []int)
func (Ordering_Column_Direction) String ¶
func (x Ordering_Column_Direction) String() string
func (*Ordering_Column_Direction) UnmarshalJSON ¶
func (x *Ordering_Column_Direction) UnmarshalJSON(data []byte) error
type OutputRouterSpec ¶
type OutputRouterSpec struct { Type OutputRouterSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.OutputRouterSpec_Type" json:"type"` Streams []StreamEndpointSpec `protobuf:"bytes,2,rep,name=streams" json:"streams"` // Only used for the BY_HASH type; these are the indexes of the columns we are // hashing. HashColumns []uint32 `protobuf:"varint,3,rep,name=hash_columns,json=hashColumns" json:"hash_columns,omitempty"` RangeRouterSpec OutputRouterSpec_RangeRouterSpec `protobuf:"bytes,4,opt,name=range_router_spec,json=rangeRouterSpec" json:"range_router_spec"` }
OutputRouterSpec is the specification for the output router of a processor; it decides how to send results to multiple output streams.
func (*OutputRouterSpec) Descriptor ¶
func (*OutputRouterSpec) Descriptor() ([]byte, []int)
func (*OutputRouterSpec) Marshal ¶
func (m *OutputRouterSpec) Marshal() (dAtA []byte, err error)
func (*OutputRouterSpec) ProtoMessage ¶
func (*OutputRouterSpec) ProtoMessage()
func (*OutputRouterSpec) Reset ¶
func (m *OutputRouterSpec) Reset()
func (*OutputRouterSpec) Size ¶
func (m *OutputRouterSpec) Size() (n int)
func (*OutputRouterSpec) String ¶
func (m *OutputRouterSpec) String() string
func (*OutputRouterSpec) Unmarshal ¶
func (m *OutputRouterSpec) Unmarshal(dAtA []byte) error
type OutputRouterSpec_RangeRouterSpec ¶
type OutputRouterSpec_RangeRouterSpec struct { // spans is a slice of Span. Input matching the Nth span will be routed to the // Nth stream (and thus the number of spans must match the number of streams). Spans []OutputRouterSpec_RangeRouterSpec_Span `protobuf:"bytes,1,rep,name=spans" json:"spans"` // default_dest, if not nil, is the index of the stream to send rows that do // not match any span. If nil, a row that does not match a span will produce // an error in the router. DefaultDest *int32 `protobuf:"varint,2,opt,name=default_dest,json=defaultDest" json:"default_dest,omitempty"` // encodings is a slice of columns and encodings. Each will be appended to a // []byte, which is used as input to the spans. Columns from the input rows // potentially need to be recoded to match the encoding used for the spans. Encodings []OutputRouterSpec_RangeRouterSpec_ColumnEncoding `protobuf:"bytes,3,rep,name=encodings" json:"encodings"` }
func (*OutputRouterSpec_RangeRouterSpec) Descriptor ¶
func (*OutputRouterSpec_RangeRouterSpec) Descriptor() ([]byte, []int)
func (*OutputRouterSpec_RangeRouterSpec) Marshal ¶
func (m *OutputRouterSpec_RangeRouterSpec) Marshal() (dAtA []byte, err error)
func (*OutputRouterSpec_RangeRouterSpec) MarshalTo ¶
func (m *OutputRouterSpec_RangeRouterSpec) MarshalTo(dAtA []byte) (int, error)
func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage ¶
func (*OutputRouterSpec_RangeRouterSpec) ProtoMessage()
func (*OutputRouterSpec_RangeRouterSpec) Reset ¶
func (m *OutputRouterSpec_RangeRouterSpec) Reset()
func (*OutputRouterSpec_RangeRouterSpec) Size ¶
func (m *OutputRouterSpec_RangeRouterSpec) Size() (n int)
func (*OutputRouterSpec_RangeRouterSpec) String ¶
func (m *OutputRouterSpec_RangeRouterSpec) String() string
func (*OutputRouterSpec_RangeRouterSpec) Unmarshal ¶
func (m *OutputRouterSpec_RangeRouterSpec) Unmarshal(dAtA []byte) error
type OutputRouterSpec_RangeRouterSpec_ColumnEncoding ¶
type OutputRouterSpec_RangeRouterSpec_ColumnEncoding struct { // column is the index of a column to encode. Column uint32 `protobuf:"varint,1,opt,name=column" json:"column"` // encoding specifies how a particular column is to be encoded for // generating the sort key for a row. This needs to correspond to the way // the Span.{start,end} keys have been generated. Encoding cockroach_sql_sqlbase2.DatumEncoding `protobuf:"varint,2,opt,name=encoding,enum=cockroach.sql.sqlbase.DatumEncoding" json:"encoding"` }
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor ¶
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Descriptor() ([]byte, []int)
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Marshal() (dAtA []byte, err error)
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) MarshalTo(dAtA []byte) (int, error)
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage ¶
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) ProtoMessage()
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Reset()
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Size() (n int)
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) String() string
func (*OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal ¶
func (m *OutputRouterSpec_RangeRouterSpec_ColumnEncoding) Unmarshal(dAtA []byte) error
type OutputRouterSpec_RangeRouterSpec_Span ¶
type OutputRouterSpec_RangeRouterSpec_Span struct { Start []byte `protobuf:"bytes,1,opt,name=start" json:"start,omitempty"` End []byte `protobuf:"bytes,2,opt,name=end" json:"end,omitempty"` }
Span matches bytes in [start, end).
func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor ¶
func (*OutputRouterSpec_RangeRouterSpec_Span) Descriptor() ([]byte, []int)
func (*OutputRouterSpec_RangeRouterSpec_Span) Marshal ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) Marshal() (dAtA []byte, err error)
func (*OutputRouterSpec_RangeRouterSpec_Span) MarshalTo ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) MarshalTo(dAtA []byte) (int, error)
func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage ¶
func (*OutputRouterSpec_RangeRouterSpec_Span) ProtoMessage()
func (*OutputRouterSpec_RangeRouterSpec_Span) Reset ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) Reset()
func (*OutputRouterSpec_RangeRouterSpec_Span) Size ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) Size() (n int)
func (*OutputRouterSpec_RangeRouterSpec_Span) String ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) String() string
func (*OutputRouterSpec_RangeRouterSpec_Span) Unmarshal ¶
func (m *OutputRouterSpec_RangeRouterSpec_Span) Unmarshal(dAtA []byte) error
type OutputRouterSpec_Type ¶
type OutputRouterSpec_Type int32
const ( // Single output stream. OutputRouterSpec_PASS_THROUGH OutputRouterSpec_Type = 0 // Each row is sent to all output streams. OutputRouterSpec_MIRROR OutputRouterSpec_Type = 1 // Each row is sent to one stream, chosen by hashing certain columns of // the row (specified by the hash_columns field). OutputRouterSpec_BY_HASH OutputRouterSpec_Type = 2 // Each row is sent to one stream, chosen according to preset boundaries // for the values of certain columns of the row. OutputRouterSpec_BY_RANGE OutputRouterSpec_Type = 3 )
func (OutputRouterSpec_Type) Enum ¶
func (x OutputRouterSpec_Type) Enum() *OutputRouterSpec_Type
func (OutputRouterSpec_Type) EnumDescriptor ¶
func (OutputRouterSpec_Type) EnumDescriptor() ([]byte, []int)
func (OutputRouterSpec_Type) String ¶
func (x OutputRouterSpec_Type) String() string
func (*OutputRouterSpec_Type) UnmarshalJSON ¶
func (x *OutputRouterSpec_Type) UnmarshalJSON(data []byte) error
type PostProcessSpec ¶
type PostProcessSpec struct { // A filtering expression which references the internal columns of the // processor via ordinal references (@1, @2, etc). Filter Expression `protobuf:"bytes,1,opt,name=filter" json:"filter"` // If true, output_columns describes a projection. Used to differentiate // between an empty projection and no projection. // // Cannot be set at the same time with render expressions. Projection bool `protobuf:"varint,2,opt,name=projection" json:"projection"` // The output columns describe a projection on the internal set of columns; // only the columns in this list will be emitted. // // Can only be set if projection is true. Cannot be set at the same time with // render expressions. OutputColumns []uint32 `protobuf:"varint,3,rep,packed,name=output_columns,json=outputColumns" json:"output_columns,omitempty"` // If set, the output is the result of rendering these expressions. The // expressions reference the internal columns of the processor. // // Cannot be set at the same time with output columns. RenderExprs []Expression `protobuf:"bytes,4,rep,name=render_exprs,json=renderExprs" json:"render_exprs"` // If nonzero, the first <offset> rows will be suppressed. Offset uint64 `protobuf:"varint,5,opt,name=offset" json:"offset"` // If nonzero, the processor will stop after emitting this many rows. The rows // suppressed by <offset>, if any, do not count towards this limit. Limit uint64 `protobuf:"varint,6,opt,name=limit" json:"limit"` }
PostProcessSpec describes the processing required to obtain the output (filtering, projection). It operates on the internal schema of the processor (see ProcessorSpec).
func (*PostProcessSpec) Descriptor ¶
func (*PostProcessSpec) Descriptor() ([]byte, []int)
func (*PostProcessSpec) Marshal ¶
func (m *PostProcessSpec) Marshal() (dAtA []byte, err error)
func (*PostProcessSpec) ProtoMessage ¶
func (*PostProcessSpec) ProtoMessage()
func (*PostProcessSpec) Reset ¶
func (m *PostProcessSpec) Reset()
func (*PostProcessSpec) Size ¶
func (m *PostProcessSpec) Size() (n int)
func (*PostProcessSpec) String ¶
func (m *PostProcessSpec) String() string
func (*PostProcessSpec) Unmarshal ¶
func (m *PostProcessSpec) Unmarshal(dAtA []byte) error
type ProcOutputHelper ¶
type ProcOutputHelper struct {
// contains filtered or unexported fields
}
ProcOutputHelper is a helper type that performs filtering and projection on the output of a processor.
func (*ProcOutputHelper) Close ¶
func (h *ProcOutputHelper) Close()
Close signals to the output that there will be no more rows.
func (*ProcOutputHelper) EmitRow ¶
func (h *ProcOutputHelper) EmitRow( ctx context.Context, row sqlbase.EncDatumRow, ) (ConsumerStatus, error)
EmitRow sends a row through the post-processing stage. The same row can be reused.
It returns the consumer's status that was observed when pushing this row. If an error is returned, it's coming from the ProcOutputHelper's filtering or rendering processing; the output has not been closed.
Note: check out emitHelper() for a useful wrapper.
func (*ProcOutputHelper) Init ¶
func (h *ProcOutputHelper) Init( post *PostProcessSpec, types []sqlbase.ColumnType, evalCtx *parser.EvalContext, output RowReceiver, ) error
Init sets up a ProcOutputHelper. The types describe the internal schema of the processor (as described for each processor core spec); they can be omitted if there is no filtering expression. Note that the types slice may be stored directly; the caller should not modify it.
type Processor ¶
type Processor interface { // OutputTypes returns the column types of the results (that are to be fed // through an output router). OutputTypes() []sqlbase.ColumnType // Run is the main loop of the processor. // If wg is non-nil, wg.Done is called before exiting. Run(ctx context.Context, wg *sync.WaitGroup) }
Processor is a common interface implemented by all processors, used by the higher-level flow orchestration code.
type ProcessorCoreUnion ¶
type ProcessorCoreUnion struct { Noop *NoopCoreSpec `protobuf:"bytes,1,opt,name=noop" json:"noop,omitempty"` TableReader *TableReaderSpec `protobuf:"bytes,2,opt,name=tableReader" json:"tableReader,omitempty"` JoinReader *JoinReaderSpec `protobuf:"bytes,3,opt,name=joinReader" json:"joinReader,omitempty"` Sorter *SorterSpec `protobuf:"bytes,4,opt,name=sorter" json:"sorter,omitempty"` Aggregator *AggregatorSpec `protobuf:"bytes,5,opt,name=aggregator" json:"aggregator,omitempty"` Distinct *DistinctSpec `protobuf:"bytes,7,opt,name=distinct" json:"distinct,omitempty"` MergeJoiner *MergeJoinerSpec `protobuf:"bytes,8,opt,name=mergeJoiner" json:"mergeJoiner,omitempty"` HashJoiner *HashJoinerSpec `protobuf:"bytes,9,opt,name=hashJoiner" json:"hashJoiner,omitempty"` Values *ValuesCoreSpec `protobuf:"bytes,10,opt,name=values" json:"values,omitempty"` Backfiller *BackfillerSpec `protobuf:"bytes,11,opt,name=backfiller" json:"backfiller,omitempty"` SetOp *AlgebraicSetOpSpec `protobuf:"bytes,12,opt,name=setOp" json:"setOp,omitempty"` ReadCSV *ReadCSVSpec `protobuf:"bytes,13,opt,name=readCSV" json:"readCSV,omitempty"` SSTWriter *SSTWriterSpec `protobuf:"bytes,14,opt,name=SSTWriter" json:"SSTWriter,omitempty"` }
func (*ProcessorCoreUnion) Descriptor ¶
func (*ProcessorCoreUnion) Descriptor() ([]byte, []int)
func (*ProcessorCoreUnion) GetValue ¶
func (this *ProcessorCoreUnion) GetValue() interface{}
func (*ProcessorCoreUnion) Marshal ¶
func (m *ProcessorCoreUnion) Marshal() (dAtA []byte, err error)
func (*ProcessorCoreUnion) MarshalTo ¶
func (m *ProcessorCoreUnion) MarshalTo(dAtA []byte) (int, error)
func (*ProcessorCoreUnion) ProtoMessage ¶
func (*ProcessorCoreUnion) ProtoMessage()
func (*ProcessorCoreUnion) Reset ¶
func (m *ProcessorCoreUnion) Reset()
func (*ProcessorCoreUnion) SetValue ¶
func (this *ProcessorCoreUnion) SetValue(value interface{}) bool
func (*ProcessorCoreUnion) Size ¶
func (m *ProcessorCoreUnion) Size() (n int)
func (*ProcessorCoreUnion) String ¶
func (m *ProcessorCoreUnion) String() string
func (*ProcessorCoreUnion) Unmarshal ¶
func (m *ProcessorCoreUnion) Unmarshal(dAtA []byte) error
type ProcessorSpec ¶
type ProcessorSpec struct { // In most cases, there is one input. Input []InputSyncSpec `protobuf:"bytes,1,rep,name=input" json:"input"` Core ProcessorCoreUnion `protobuf:"bytes,2,opt,name=core" json:"core"` Post PostProcessSpec `protobuf:"bytes,4,opt,name=post" json:"post"` // In most cases, there is one output. Output []OutputRouterSpec `protobuf:"bytes,3,rep,name=output" json:"output"` // An optional identifier that can be used to correlate processors that are // part of the same stage (e.g. multiple joiners that are part of a // distributed join). This has no consequence on the running of flows, but is // useful for plan diagrams. StageID int32 `protobuf:"varint,5,opt,name=stage_id,json=stageId" json:"stage_id"` }
Each processor has the following components:
one or more input synchronizers; each one merges rows between one or more input streams;
a processor "core" which encapsulates the inner logic of each processor;
a post-processing stage which allows "inline" post-processing on results (like projection or filtering);
one or more output synchronizers; each one directs rows to one or more output streams.
== Internal columns ==
The core outputs rows of a certain schema to the post-processing stage. We call this the "internal schema" (or "internal columns") and it differs for each type of core. Column indices in a PostProcessSpec refers to these internal columns. Some columns may be unused by the post-processing stage; processor implementations are internally optimized to not produce values for such unneded columns.
func (*ProcessorSpec) Descriptor ¶
func (*ProcessorSpec) Descriptor() ([]byte, []int)
func (*ProcessorSpec) Marshal ¶
func (m *ProcessorSpec) Marshal() (dAtA []byte, err error)
func (*ProcessorSpec) ProtoMessage ¶
func (*ProcessorSpec) ProtoMessage()
func (*ProcessorSpec) Reset ¶
func (m *ProcessorSpec) Reset()
func (*ProcessorSpec) Size ¶
func (m *ProcessorSpec) Size() (n int)
func (*ProcessorSpec) String ¶
func (m *ProcessorSpec) String() string
func (*ProcessorSpec) Unmarshal ¶
func (m *ProcessorSpec) Unmarshal(dAtA []byte) error
type ProducerData ¶
type ProducerData struct { // A bunch of rows, encoded. Each datum is encoded according to the // corresponding DatumInfo. RawBytes []byte `protobuf:"bytes,1,opt,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"` // In the special case when the stream contains empty rows, the count is // passed instead. NumEmptyRows int32 `protobuf:"varint,3,opt,name=num_empty_rows,json=numEmptyRows" json:"num_empty_rows"` // A bunch of metadata messages. Metadata []RemoteProducerMetadata `protobuf:"bytes,2,rep,name=metadata" json:"metadata"` }
ProducerData is a message that can be sent multiple times as part of a stream from a producer to a consumer. It contains 0 or more rows and/or 0 or more metadata messages.
func (*ProducerData) Descriptor ¶
func (*ProducerData) Descriptor() ([]byte, []int)
func (*ProducerData) Marshal ¶
func (m *ProducerData) Marshal() (dAtA []byte, err error)
func (*ProducerData) ProtoMessage ¶
func (*ProducerData) ProtoMessage()
func (*ProducerData) Reset ¶
func (m *ProducerData) Reset()
func (*ProducerData) Size ¶
func (m *ProducerData) Size() (n int)
func (*ProducerData) String ¶
func (m *ProducerData) String() string
func (*ProducerData) Unmarshal ¶
func (m *ProducerData) Unmarshal(dAtA []byte) error
type ProducerHeader ¶
type ProducerHeader struct { FlowID FlowID `protobuf:"bytes,1,opt,name=flow_id,json=flowId,customtype=FlowID" json:"flow_id"` StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"` }
ProducerHeader is a message that is sent once at the beginning of a stream.
func (*ProducerHeader) Descriptor ¶
func (*ProducerHeader) Descriptor() ([]byte, []int)
func (*ProducerHeader) Marshal ¶
func (m *ProducerHeader) Marshal() (dAtA []byte, err error)
func (*ProducerHeader) ProtoMessage ¶
func (*ProducerHeader) ProtoMessage()
func (*ProducerHeader) Reset ¶
func (m *ProducerHeader) Reset()
func (*ProducerHeader) Size ¶
func (m *ProducerHeader) Size() (n int)
func (*ProducerHeader) String ¶
func (m *ProducerHeader) String() string
func (*ProducerHeader) Unmarshal ¶
func (m *ProducerHeader) Unmarshal(dAtA []byte) error
type ProducerMessage ¶
type ProducerMessage struct { Header *ProducerHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // Typing information. There will be one DatumInfo for each element in a row. // This field has to be populated on, or before, a ProducerMessage with data // in it, and can only be populated once. // TODO(andrei): It'd be nice if the typing information for streams would be // configured statically at plan creation time, instead of being discovered // dynamically through the first rows that flow. Typing []DatumInfo `protobuf:"bytes,2,rep,name=typing" json:"typing"` Data ProducerData `protobuf:"bytes,3,opt,name=data" json:"data"` }
func (*ProducerMessage) Descriptor ¶
func (*ProducerMessage) Descriptor() ([]byte, []int)
func (*ProducerMessage) Marshal ¶
func (m *ProducerMessage) Marshal() (dAtA []byte, err error)
func (*ProducerMessage) ProtoMessage ¶
func (*ProducerMessage) ProtoMessage()
func (*ProducerMessage) Reset ¶
func (m *ProducerMessage) Reset()
func (*ProducerMessage) Size ¶
func (m *ProducerMessage) Size() (n int)
func (*ProducerMessage) String ¶
func (m *ProducerMessage) String() string
func (*ProducerMessage) Unmarshal ¶
func (m *ProducerMessage) Unmarshal(dAtA []byte) error
type ProducerMetadata ¶
type ProducerMetadata struct { // Only one of these fields will be set. If this ever changes, note that // there's consumers out there that extract the error and, if there is one, // forward it in isolation and drop the rest of the record. Ranges []roachpb.RangeInfo // TODO(vivek): change to type Error Err error // TraceData is sent if snowball tracing is enabled. TraceData []tracing.RecordedSpan }
ProducerMetadata represents a metadata record flowing through a DistSQL flow.
func (ProducerMetadata) Empty ¶
func (meta ProducerMetadata) Empty() bool
Empty returns true if none of the fields in metadata are populated.
type ReadCSVSpec ¶
type ReadCSVSpec struct { Options cockroach_roachpb1.CSVOptions `protobuf:"bytes,1,opt,name=options" json:"options"` // sample_size is the rate at which to output rows, based on an input row's size. SampleSize int32 `protobuf:"varint,2,opt,name=sample_size,json=sampleSize" json:"sample_size"` // table_desc is the table descriptor of the CSV file. TableDesc cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,3,opt,name=table_desc,json=tableDesc" json:"table_desc"` // uri is a storageccl.ExportStorage URI pointing to the CSV file to be read. Uri string `protobuf:"bytes,4,opt,name=uri" json:"uri"` }
ReadCSVSpec is the specification for a processor that reads a CSV file at uri with descriptor table_desc, specified delimiter (comma), optional comment rune, and optional nullif string (which is nullable to differentiate between not set (nil) and the empty string (which could be used as the null marker). It outputs rows that are a sampling of the file at a rate of (row size) / sample_size. See ccs/sqlccl/csv.go for implementation.
func (*ReadCSVSpec) Descriptor ¶
func (*ReadCSVSpec) Descriptor() ([]byte, []int)
func (*ReadCSVSpec) Marshal ¶
func (m *ReadCSVSpec) Marshal() (dAtA []byte, err error)
func (*ReadCSVSpec) ProtoMessage ¶
func (*ReadCSVSpec) ProtoMessage()
func (*ReadCSVSpec) Reset ¶
func (m *ReadCSVSpec) Reset()
func (*ReadCSVSpec) Size ¶
func (m *ReadCSVSpec) Size() (n int)
func (*ReadCSVSpec) String ¶
func (m *ReadCSVSpec) String() string
func (*ReadCSVSpec) Unmarshal ¶
func (m *ReadCSVSpec) Unmarshal(dAtA []byte) error
type RemoteProducerMetadata ¶
type RemoteProducerMetadata struct { // Types that are valid to be assigned to Value: // *RemoteProducerMetadata_RangeInfo // *RemoteProducerMetadata_Error // *RemoteProducerMetadata_TraceData_ Value isRemoteProducerMetadata_Value `protobuf_oneof:"value"` }
RemoteProducerMetadata represents records that a producer wants to pass to a consumer, other than data rows. It's named RemoteProducerMetadata to not clash with ProducerMetadata, which is used internally within a node and has a different go error instead of a proto error inside.
func (*RemoteProducerMetadata) Descriptor ¶
func (*RemoteProducerMetadata) Descriptor() ([]byte, []int)
func (*RemoteProducerMetadata) GetError ¶
func (m *RemoteProducerMetadata) GetError() *Error
func (*RemoteProducerMetadata) GetRangeInfo ¶
func (m *RemoteProducerMetadata) GetRangeInfo() *RemoteProducerMetadata_RangeInfos
func (*RemoteProducerMetadata) GetTraceData ¶
func (m *RemoteProducerMetadata) GetTraceData() *RemoteProducerMetadata_TraceData
func (*RemoteProducerMetadata) GetValue ¶
func (m *RemoteProducerMetadata) GetValue() isRemoteProducerMetadata_Value
func (*RemoteProducerMetadata) Marshal ¶
func (m *RemoteProducerMetadata) Marshal() (dAtA []byte, err error)
func (*RemoteProducerMetadata) MarshalTo ¶
func (m *RemoteProducerMetadata) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata) ProtoMessage ¶
func (*RemoteProducerMetadata) ProtoMessage()
func (*RemoteProducerMetadata) Reset ¶
func (m *RemoteProducerMetadata) Reset()
func (*RemoteProducerMetadata) Size ¶
func (m *RemoteProducerMetadata) Size() (n int)
func (*RemoteProducerMetadata) String ¶
func (m *RemoteProducerMetadata) String() string
func (*RemoteProducerMetadata) Unmarshal ¶
func (m *RemoteProducerMetadata) Unmarshal(dAtA []byte) error
func (*RemoteProducerMetadata) XXX_OneofFuncs ¶
func (*RemoteProducerMetadata) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})
XXX_OneofFuncs is for the internal use of the proto package.
type RemoteProducerMetadata_Error ¶
type RemoteProducerMetadata_Error struct {
Error *Error `protobuf:"bytes,2,opt,name=error,oneof"`
}
func (*RemoteProducerMetadata_Error) MarshalTo ¶
func (m *RemoteProducerMetadata_Error) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata_Error) Size ¶
func (m *RemoteProducerMetadata_Error) Size() (n int)
type RemoteProducerMetadata_RangeInfo ¶
type RemoteProducerMetadata_RangeInfo struct {
RangeInfo *RemoteProducerMetadata_RangeInfos `protobuf:"bytes,1,opt,name=range_info,json=rangeInfo,oneof"`
}
func (*RemoteProducerMetadata_RangeInfo) MarshalTo ¶
func (m *RemoteProducerMetadata_RangeInfo) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata_RangeInfo) Size ¶
func (m *RemoteProducerMetadata_RangeInfo) Size() (n int)
type RemoteProducerMetadata_RangeInfos ¶
type RemoteProducerMetadata_RangeInfos struct {
RangeInfo []cockroach_roachpb3.RangeInfo `protobuf:"bytes,1,rep,name=range_info,json=rangeInfo" json:"range_info"`
}
func (*RemoteProducerMetadata_RangeInfos) Descriptor ¶
func (*RemoteProducerMetadata_RangeInfos) Descriptor() ([]byte, []int)
func (*RemoteProducerMetadata_RangeInfos) Marshal ¶
func (m *RemoteProducerMetadata_RangeInfos) Marshal() (dAtA []byte, err error)
func (*RemoteProducerMetadata_RangeInfos) MarshalTo ¶
func (m *RemoteProducerMetadata_RangeInfos) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata_RangeInfos) ProtoMessage ¶
func (*RemoteProducerMetadata_RangeInfos) ProtoMessage()
func (*RemoteProducerMetadata_RangeInfos) Reset ¶
func (m *RemoteProducerMetadata_RangeInfos) Reset()
func (*RemoteProducerMetadata_RangeInfos) Size ¶
func (m *RemoteProducerMetadata_RangeInfos) Size() (n int)
func (*RemoteProducerMetadata_RangeInfos) String ¶
func (m *RemoteProducerMetadata_RangeInfos) String() string
func (*RemoteProducerMetadata_RangeInfos) Unmarshal ¶
func (m *RemoteProducerMetadata_RangeInfos) Unmarshal(dAtA []byte) error
type RemoteProducerMetadata_TraceData ¶
type RemoteProducerMetadata_TraceData struct {
CollectedSpans []cockroach_util_tracing.RecordedSpan `protobuf:"bytes,1,rep,name=collected_spans,json=collectedSpans" json:"collected_spans"`
}
func (*RemoteProducerMetadata_TraceData) Descriptor ¶
func (*RemoteProducerMetadata_TraceData) Descriptor() ([]byte, []int)
func (*RemoteProducerMetadata_TraceData) Marshal ¶
func (m *RemoteProducerMetadata_TraceData) Marshal() (dAtA []byte, err error)
func (*RemoteProducerMetadata_TraceData) MarshalTo ¶
func (m *RemoteProducerMetadata_TraceData) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata_TraceData) ProtoMessage ¶
func (*RemoteProducerMetadata_TraceData) ProtoMessage()
func (*RemoteProducerMetadata_TraceData) Reset ¶
func (m *RemoteProducerMetadata_TraceData) Reset()
func (*RemoteProducerMetadata_TraceData) Size ¶
func (m *RemoteProducerMetadata_TraceData) Size() (n int)
func (*RemoteProducerMetadata_TraceData) String ¶
func (m *RemoteProducerMetadata_TraceData) String() string
func (*RemoteProducerMetadata_TraceData) Unmarshal ¶
func (m *RemoteProducerMetadata_TraceData) Unmarshal(dAtA []byte) error
type RemoteProducerMetadata_TraceData_ ¶
type RemoteProducerMetadata_TraceData_ struct {
TraceData *RemoteProducerMetadata_TraceData `protobuf:"bytes,3,opt,name=trace_data,json=traceData,oneof"`
}
func (*RemoteProducerMetadata_TraceData_) MarshalTo ¶
func (m *RemoteProducerMetadata_TraceData_) MarshalTo(dAtA []byte) (int, error)
func (*RemoteProducerMetadata_TraceData_) Size ¶
func (m *RemoteProducerMetadata_TraceData_) Size() (n int)
type RepeatableRowSource ¶
type RepeatableRowSource struct {
// contains filtered or unexported fields
}
RepeatableRowSource is a RowSource used in benchmarks to avoid having to reinitialize a new RowSource every time during multiple passes of the input. It is intended to be initialized with all rows.
func NewRepeatableRowSource ¶
func NewRepeatableRowSource( types []sqlbase.ColumnType, rows sqlbase.EncDatumRows, ) *RepeatableRowSource
NewRepeatableRowSource creates a RepeatableRowSource with the given schema and rows. types is optional if at least one row is provided.
func (*RepeatableRowSource) ConsumerClosed ¶
func (r *RepeatableRowSource) ConsumerClosed()
ConsumerClosed is part of the RowSource interface.
func (*RepeatableRowSource) ConsumerDone ¶
func (r *RepeatableRowSource) ConsumerDone()
ConsumerDone is part of the RowSource interface.
func (*RepeatableRowSource) Next ¶
func (r *RepeatableRowSource) Next() (sqlbase.EncDatumRow, ProducerMetadata)
Next is part of the RowSource interface.
func (*RepeatableRowSource) Reset ¶
func (r *RepeatableRowSource) Reset()
Reset resets the RepeatableRowSource such that a subsequent call to Next() returns the first row.
func (*RepeatableRowSource) Types ¶
func (r *RepeatableRowSource) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type RowBuffer ¶
type RowBuffer struct { // ProducerClosed is used when the RowBuffer is used as a RowReceiver; it is // set to true when the sender calls ProducerDone(). ProducerClosed bool // Done is used when the RowBuffer is used as a RowSource; it is set to true // when the receiver read all the rows. Done bool ConsumerStatus ConsumerStatus // contains filtered or unexported fields }
RowBuffer is an implementation of RowReceiver that buffers (accumulates) results in memory, as well as an implementation of RowSource that returns records from a record buffer. Just for tests.
func NewRowBuffer ¶
func NewRowBuffer( types []sqlbase.ColumnType, rows sqlbase.EncDatumRows, hooks RowBufferArgs, ) *RowBuffer
NewRowBuffer creates a RowBuffer with the given schema and initial rows. The types are optional if there is at least one row.
func (*RowBuffer) ConsumerClosed ¶
func (rb *RowBuffer) ConsumerClosed()
ConsumerClosed is part of the RowSource interface.
func (*RowBuffer) ConsumerDone ¶
func (rb *RowBuffer) ConsumerDone()
ConsumerDone is part of the RowSource interface.
func (*RowBuffer) Next ¶
func (rb *RowBuffer) Next() (sqlbase.EncDatumRow, ProducerMetadata)
Next is part of the RowSource interface.
There's no synchronization here with Push(). The assumption is that these two methods are not called concurrently.
func (*RowBuffer) ProducerDone ¶
func (rb *RowBuffer) ProducerDone()
ProducerDone is part of the interface.
func (*RowBuffer) Push ¶
func (rb *RowBuffer) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
Push is part of the RowReceiver interface.
func (*RowBuffer) Types ¶
func (rb *RowBuffer) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type RowBufferArgs ¶
type RowBufferArgs struct { // If not set, then the RowBuffer will behave like a RowChannel and not // accumulate rows after it's been put in draining mode. If set, rows will still // be accumulated. Useful for tests that want to observe what rows have been // pushed after draining. AccumulateRowsWhileDraining bool // OnConsumerDone, if specified, is called as the first thing in the // ConsumerDone() method. OnConsumerDone func(*RowBuffer) // OnConsumerClose, if specified, is called as the first thing in the // ConsumerClosed() method. OnConsumerClosed func(*RowBuffer) // OnNext, if specified, is called as the first thing in the Next() method. // If it returns an empty row and metadata, then RowBuffer.Next() is allowed // to run normally. Otherwise, the values are returned from RowBuffer.Next(). OnNext func(*RowBuffer) (sqlbase.EncDatumRow, ProducerMetadata) }
RowBufferArgs contains testing-oriented parameters for a RowBuffer.
type RowChannel ¶
type RowChannel struct { // The channel on which rows are delivered. C <-chan RowChannelMsg // contains filtered or unexported fields }
RowChannel is a thin layer over a RowChannelMsg channel, which can be used to transfer rows between goroutines.
func (*RowChannel) ConsumerClosed ¶
func (rc *RowChannel) ConsumerClosed()
ConsumerClosed is part of the RowSource interface.
func (*RowChannel) ConsumerDone ¶
func (rc *RowChannel) ConsumerDone()
ConsumerDone is part of the RowSource interface.
func (*RowChannel) Init ¶
func (rc *RowChannel) Init(types []sqlbase.ColumnType)
Init initializes the RowChannel with the default buffer size.
func (*RowChannel) InitWithBufSize ¶
func (rc *RowChannel) InitWithBufSize(types []sqlbase.ColumnType, chanBufSize int)
InitWithBufSize initializes the RowChannel with a given buffer size.
func (*RowChannel) Next ¶
func (rc *RowChannel) Next() (sqlbase.EncDatumRow, ProducerMetadata)
Next is part of the RowSource interface.
func (*RowChannel) ProducerDone ¶
func (rc *RowChannel) ProducerDone()
ProducerDone is part of the RowReceiver interface.
func (*RowChannel) Push ¶
func (rc *RowChannel) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
Push is part of the RowReceiver interface.
func (*RowChannel) Types ¶
func (rc *RowChannel) Types() []sqlbase.ColumnType
Types is part of the RowSource interface.
type RowChannelMsg ¶
type RowChannelMsg struct { // Only one of these fields will be set. Row sqlbase.EncDatumRow Meta ProducerMetadata }
RowChannelMsg is the message used in the channels that implement local physical streams (i.e. the RowChannel's).
type RowDisposer ¶
type RowDisposer struct{}
RowDisposer is a RowReceiver that discards any rows Push()ed.
func (*RowDisposer) ProducerDone ¶
func (r *RowDisposer) ProducerDone()
ProducerDone is part of the RowReceiver interface.
func (*RowDisposer) Push ¶
func (r *RowDisposer) Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus
Push is part of the RowReceiver interface.
type RowReceiver ¶
type RowReceiver interface { // Push sends a record to the consumer of this RowReceiver. Exactly one of the // row/meta must be specified (i.e. either row needs to be non-nil or meta // needs to be non-Empty()). May block. // // The return value indicates the current status of the consumer. Depending on // it, producers are expected to drain or shut down. In all cases, // ProducerDone() needs to be called (after draining is done, if draining was // requested). // // The sender must not modify the row after calling this function. // // After DrainRequested is returned, it is expected that all future calls only // carry metadata (however that is not enforced and implementations should be // prepared to discard non-metadata rows). If ConsumerClosed is returned, // implementations have to ignore further calls to Push() (such calls are // allowed because there might be multiple producers for a single RowReceiver // and they might not all be aware of the last status returned). // // Implementations of Push() must be thread-safe. Push(row sqlbase.EncDatumRow, meta ProducerMetadata) ConsumerStatus // ProducerDone is called when the producer has pushed all the rows and // metadata; it causes the RowReceiver to process all rows and clean up. // // ProducerDone() cannot be called concurrently with Push(), and after it // is called, no other method can be called. ProducerDone() }
RowReceiver is any component of a flow that receives rows from another component. It can be an input synchronizer, a router, or a mailbox.
type RowSource ¶
type RowSource interface { // Types returns the schema for the rows in this source. Types() []sqlbase.ColumnType // Next returns the next record that a producer has pushed into this // RowSource. At most one of the return values will be non-empty. Both of them // can be empty when the RowSource has been exhausted - no more records are // coming and any further method calls will be no-ops. // // A ProducerMetadata record may contain an error. In that case, this // interface is oblivious about the semantics: implementers may continue // returning different rows on future calls, or may return an empty record // (thus asking the consumer to stop asking for rows). In particular, // implementers are not required to only return metadata records from this // point on (which means, for example, that they're not required to // automatically ask every producer to drain, in case there's multiple // producers). Therefore, consumers need to be aware that some rows might have // been skipped in case they continue to consume rows. Usually a consumer // should react to an error by calling ConsumerDone(), thus asking the // RowSource to drain, and separately discard any future data rows. Next() (sqlbase.EncDatumRow, ProducerMetadata) // ConsumerDone lets the producer know that we will not need any more data // rows. The producer is expected to start draining and only send metadata // rows. // // May block. If the consumer of the source stops consuming rows before // Next indicates that there are no more rows, ConsumerDone() and/or // ConsumerClosed() must be called; it is a no-op to call these methods after // all the rows were consumed (i.e. after Next() returned an empty row). ConsumerDone() // ConsumerClosed informs the producer that the consumer will not be reading // any more rows. The producer is expected to shut down without sending // anything else. // // Like ConsumerDone(), if the consumer of the source stops consuming rows // before Next indicates that there are no more rows, ConsumerDone() and/or // ConsumerClosed() must be called; it is a no-op to call these methods after // all the rows were consumed (i.e. after Next() returned an empty row). ConsumerClosed() }
RowSource is any component of a flow that produces rows that cam be consumed by another component.
type SSTWriterSpec ¶
type SSTWriterSpec struct { // destination as a storageccl.ExportStorage URI pointing to an export store // location (directory). Destination string `protobuf:"bytes,1,opt,name=destination" json:"destination"` // name is the file name that will be written by the export store. Name string `protobuf:"bytes,2,opt,name=name" json:"name"` // walltimeNanos is the MVCC time at which the created KVs will be written. WalltimeNanos int64 `protobuf:"varint,3,opt,name=walltimeNanos" json:"walltimeNanos"` }
SSTWriterSpec is the specification for a processor that consumes rows, uses tempStorage to sort them, then writes them all to a single SST file at uri/name. walltime is used as the MVCC timestamp. It outputs a single row containing the file name, size, checksum, observed start and end keys. See ccs/sqlccl/csv.go for implementation.
func (*SSTWriterSpec) Descriptor ¶
func (*SSTWriterSpec) Descriptor() ([]byte, []int)
func (*SSTWriterSpec) Marshal ¶
func (m *SSTWriterSpec) Marshal() (dAtA []byte, err error)
func (*SSTWriterSpec) ProtoMessage ¶
func (*SSTWriterSpec) ProtoMessage()
func (*SSTWriterSpec) Reset ¶
func (m *SSTWriterSpec) Reset()
func (*SSTWriterSpec) Size ¶
func (m *SSTWriterSpec) Size() (n int)
func (*SSTWriterSpec) String ¶
func (m *SSTWriterSpec) String() string
func (*SSTWriterSpec) Unmarshal ¶
func (m *SSTWriterSpec) Unmarshal(dAtA []byte) error
type ServerConfig ¶
type ServerConfig struct { log.AmbientContext Settings *cluster.Settings // DB is a handle to the cluster. DB *client.DB // FlowDB is the DB that flows should use for interacting with the database. // This DB has to be set such that it bypasses the local TxnCoordSender. We // want only the TxnCoordSender on the gateway to be involved with requests // performed by DistSQL. FlowDB *client.DB RPCContext *rpc.Context Stopper *stop.Stopper TestingKnobs TestingKnobs ParentMemoryMonitor *mon.BytesMonitor // TempStorage is used by some DistSQL processors to store rows when the // working set is larger than can be stored in memory. TempStorage engine.Engine TempStorageMaxSizeBytes int64 Metrics *DistSQLMetrics // NodeID is the id of the node on which this Server is running. NodeID *base.NodeIDContainer ClusterID uuid.UUID // JobRegistry manages jobs being used by this Server. JobRegistry *jobs.Registry // A handle to gossip used to broadcast the node's DistSQL version. Gossip *gossip.Gossip // Extra1_0Compatibility enables compatibility with planned on 1.0.x gateways. // See server.Config.Extra1_0Compatibility. Extra1_0Compatibility bool }
ServerConfig encompasses the configuration required to create a DistSQLServer.
type ServerImpl ¶
type ServerImpl struct { ServerConfig // contains filtered or unexported fields }
ServerImpl implements the server for the distributed SQL APIs.
func NewServer ¶
func NewServer(ctx context.Context, cfg ServerConfig) *ServerImpl
NewServer instantiates a DistSQLServer.
func (*ServerImpl) FlowStream ¶
func (ds *ServerImpl) FlowStream(stream DistSQL_FlowStreamServer) error
FlowStream is part of the DistSQLServer interface.
func (*ServerImpl) RunSyncFlow ¶
func (ds *ServerImpl) RunSyncFlow(stream DistSQL_RunSyncFlowServer) error
RunSyncFlow is part of the DistSQLServer interface.
func (*ServerImpl) SetupFlow ¶
func (ds *ServerImpl) SetupFlow( ctx context.Context, req *SetupFlowRequest, ) (*SimpleResponse, error)
SetupFlow is part of the DistSQLServer interface.
func (*ServerImpl) SetupSyncFlow ¶
func (ds *ServerImpl) SetupSyncFlow( ctx context.Context, req *SetupFlowRequest, output RowReceiver, ) (context.Context, *Flow, error)
SetupSyncFlow sets up a synchoronous flow, connecting the sync response output stream to the given RowReceiver. The flow is not started. The flow will be associated with the given context. Note: the returned context contains a span that must be finished through Flow.Cleanup.
type SetupFlowRequest ¶
type SetupFlowRequest struct { Txn cockroach_roachpb1.Transaction `protobuf:"bytes,1,opt,name=txn" json:"txn"` // Version of distsqlrun protocol; a server accepts a certain range of // versions, up to its own version. See server.go for more details. Version DistSQLVersion `protobuf:"varint,5,opt,name=version,casttype=DistSQLVersion" json:"version"` Flow FlowSpec `protobuf:"bytes,3,opt,name=flow" json:"flow"` EvalContext EvalContext `protobuf:"bytes,6,opt,name=evalContext" json:"evalContext"` }
func (*SetupFlowRequest) Descriptor ¶
func (*SetupFlowRequest) Descriptor() ([]byte, []int)
func (*SetupFlowRequest) Marshal ¶
func (m *SetupFlowRequest) Marshal() (dAtA []byte, err error)
func (*SetupFlowRequest) ProtoMessage ¶
func (*SetupFlowRequest) ProtoMessage()
func (*SetupFlowRequest) Reset ¶
func (m *SetupFlowRequest) Reset()
func (*SetupFlowRequest) Size ¶
func (m *SetupFlowRequest) Size() (n int)
func (*SetupFlowRequest) String ¶
func (m *SetupFlowRequest) String() string
func (*SetupFlowRequest) Unmarshal ¶
func (m *SetupFlowRequest) Unmarshal(dAtA []byte) error
type SimpleResponse ¶
type SimpleResponse struct {
Error *Error `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
}
func (*SimpleResponse) Descriptor ¶
func (*SimpleResponse) Descriptor() ([]byte, []int)
func (*SimpleResponse) Marshal ¶
func (m *SimpleResponse) Marshal() (dAtA []byte, err error)
func (*SimpleResponse) ProtoMessage ¶
func (*SimpleResponse) ProtoMessage()
func (*SimpleResponse) Reset ¶
func (m *SimpleResponse) Reset()
func (*SimpleResponse) Size ¶
func (m *SimpleResponse) Size() (n int)
func (*SimpleResponse) String ¶
func (m *SimpleResponse) String() string
func (*SimpleResponse) Unmarshal ¶
func (m *SimpleResponse) Unmarshal(dAtA []byte) error
type SorterSpec ¶
type SorterSpec struct { OutputOrdering Ordering `protobuf:"bytes,1,opt,name=output_ordering,json=outputOrdering" json:"output_ordering"` // Ordering match length, specifying that the input is already sorted by the // first 'n' output ordering columns, can be optionally specified for // possible speed-ups taking advantage of the partial orderings. OrderingMatchLen uint32 `protobuf:"varint,2,opt,name=ordering_match_len,json=orderingMatchLen" json:"ordering_match_len"` }
SorterSpec is the specification for a "sorting aggregator". A sorting processor sorts elements in the input stream providing a certain output order guarantee regardless of the input ordering. The output ordering is according to a configurable set of columns.
The "internal columns" of a Sorter (see ProcessorSpec) are the same as the input columns.
func (*SorterSpec) Descriptor ¶
func (*SorterSpec) Descriptor() ([]byte, []int)
func (*SorterSpec) Marshal ¶
func (m *SorterSpec) Marshal() (dAtA []byte, err error)
func (*SorterSpec) ProtoMessage ¶
func (*SorterSpec) ProtoMessage()
func (*SorterSpec) Reset ¶
func (m *SorterSpec) Reset()
func (*SorterSpec) Size ¶
func (m *SorterSpec) Size() (n int)
func (*SorterSpec) String ¶
func (m *SorterSpec) String() string
func (*SorterSpec) Unmarshal ¶
func (m *SorterSpec) Unmarshal(dAtA []byte) error
type StreamDecoder ¶
type StreamDecoder struct {
// contains filtered or unexported fields
}
StreamDecoder converts a sequence of ProducerMessage to rows and metadata records.
Sample usage:
sd := StreamDecoder{} var row sqlbase.EncDatumRow for each message in stream { err := sd.AddMessage(msg) if err != nil { ... } for { row, meta, err := sd.GetRow(row) if err != nil { ... } if row == nil && meta.Empty() { // No more rows in this message. break } // Use <row> ... } }
AddMessage can be called multiple times before getting the rows, but this will cause data to accumulate internally.
func (*StreamDecoder) AddMessage ¶
func (sd *StreamDecoder) AddMessage(msg *ProducerMessage) error
AddMessage adds the data in a ProducerMessage to the decoder.
The StreamDecoder may keep a reference to msg.Data.RawBytes and msg.Data.Metadata until all the rows in the message are retrieved with GetRow.
If an error is returned, no records have been buffered in the StreamDecoder.
func (*StreamDecoder) GetRow ¶
func (sd *StreamDecoder) GetRow( rowBuf sqlbase.EncDatumRow, ) (sqlbase.EncDatumRow, ProducerMetadata, error)
GetRow returns a row received in the stream. A row buffer can be provided optionally.
Returns an empty row if there are no more rows received so far.
A decoding error may be returned. Note that these are separate from error coming from the upstream (through ProducerMetadata.Err).
type StreamEncoder ¶
type StreamEncoder struct {
// contains filtered or unexported fields
}
StreamEncoder converts EncDatum rows into a sequence of ProducerMessage.
Sample usage:
se := StreamEncoder{} for { for ... { err := se.AddRow(...) ... } msg := se.FormMessage(false, nil) // Send out message. ... } msg := se.FormMessage(true, nil) // Send out final message ...
func (*StreamEncoder) AddMetadata ¶
func (se *StreamEncoder) AddMetadata(meta ProducerMetadata)
AddMetadata encodes a metadata message. Unlike AddRow(), it cannot fail. This is important for the caller because a failure to encode a piece of metadata (particularly one that contains an error) would not be recoverable.
Metadata records lose their ordering wrt the data rows. The convention is that the StreamDecoder will return them first, before the data rows, thus ensuring that rows produced _after_ an error are not received _before_ the error.
func (*StreamEncoder) AddRow ¶
func (se *StreamEncoder) AddRow(row sqlbase.EncDatumRow) error
AddRow encodes a message.
func (*StreamEncoder) FormMessage ¶
func (se *StreamEncoder) FormMessage(ctx context.Context) *ProducerMessage
FormMessage populates a message containing the rows added since the last call to FormMessage. The returned ProducerMessage should be treated as immutable.
type StreamEndpointSpec ¶
type StreamEndpointSpec struct { Type StreamEndpointSpec_Type `protobuf:"varint,1,opt,name=type,enum=cockroach.sql.distsqlrun.StreamEndpointSpec_Type" json:"type"` // The ID of this stream. // // For LOCAL streams, both ends of the stream are part of the flow on this // machine (and there must be a corresponding endpoint with the same ID). // // For REMOTE streams, this ID is used in the ProducerHeader when connecting to // the other host. // // For SYNC_RESPONSE streams, the ID is unused. StreamID StreamID `protobuf:"varint,2,opt,name=stream_id,json=streamId,casttype=StreamID" json:"stream_id"` // Serving address for the target host, only used for outgoing REMOTE streams. TargetAddr string `protobuf:"bytes,3,opt,name=target_addr,json=targetAddr" json:"target_addr"` }
StreamEndpointSpec describes one of the endpoints (input or output) of a physical stream.
func (*StreamEndpointSpec) Descriptor ¶
func (*StreamEndpointSpec) Descriptor() ([]byte, []int)
func (*StreamEndpointSpec) Marshal ¶
func (m *StreamEndpointSpec) Marshal() (dAtA []byte, err error)
func (*StreamEndpointSpec) MarshalTo ¶
func (m *StreamEndpointSpec) MarshalTo(dAtA []byte) (int, error)
func (*StreamEndpointSpec) ProtoMessage ¶
func (*StreamEndpointSpec) ProtoMessage()
func (*StreamEndpointSpec) Reset ¶
func (m *StreamEndpointSpec) Reset()
func (*StreamEndpointSpec) Size ¶
func (m *StreamEndpointSpec) Size() (n int)
func (*StreamEndpointSpec) String ¶
func (m *StreamEndpointSpec) String() string
func (*StreamEndpointSpec) Unmarshal ¶
func (m *StreamEndpointSpec) Unmarshal(dAtA []byte) error
type StreamEndpointSpec_Type ¶
type StreamEndpointSpec_Type int32
const ( // Stream that is part of the local flow. StreamEndpointSpec_LOCAL StreamEndpointSpec_Type = 0 // Stream that has the other endpoint on a different node. StreamEndpointSpec_REMOTE StreamEndpointSpec_Type = 1 // Special stream used when in "sync flow" mode. In this mode, we return // results directly as part of the RPC call that set up the flow. This saves // overhead (extra RPCs) compared to the normal mode where the RPC just sets // up the flow. This type can only be used with outbound endpoints. StreamEndpointSpec_SYNC_RESPONSE StreamEndpointSpec_Type = 2 )
func (StreamEndpointSpec_Type) Enum ¶
func (x StreamEndpointSpec_Type) Enum() *StreamEndpointSpec_Type
func (StreamEndpointSpec_Type) EnumDescriptor ¶
func (StreamEndpointSpec_Type) EnumDescriptor() ([]byte, []int)
func (StreamEndpointSpec_Type) String ¶
func (x StreamEndpointSpec_Type) String() string
func (*StreamEndpointSpec_Type) UnmarshalJSON ¶
func (x *StreamEndpointSpec_Type) UnmarshalJSON(data []byte) error
type StreamID ¶
type StreamID int
StreamID identifies a stream; it may be local to a flow or it may cross machine boundaries. The identifier can only be used in the context of a specific flow.
type TableReaderSpan ¶
type TableReaderSpan struct { // TODO(radu): the dist_sql APIs should be agnostic to how we map tables to // KVs. The span should be described as starting and ending lists of values // for a prefix of the index columns, along with inclusive/exclusive flags. Span cockroach_roachpb1.Span `protobuf:"bytes,1,opt,name=span" json:"span"` }
func (*TableReaderSpan) Descriptor ¶
func (*TableReaderSpan) Descriptor() ([]byte, []int)
func (*TableReaderSpan) Marshal ¶
func (m *TableReaderSpan) Marshal() (dAtA []byte, err error)
func (*TableReaderSpan) ProtoMessage ¶
func (*TableReaderSpan) ProtoMessage()
func (*TableReaderSpan) Reset ¶
func (m *TableReaderSpan) Reset()
func (*TableReaderSpan) Size ¶
func (m *TableReaderSpan) Size() (n int)
func (*TableReaderSpan) String ¶
func (m *TableReaderSpan) String() string
func (*TableReaderSpan) Unmarshal ¶
func (m *TableReaderSpan) Unmarshal(dAtA []byte) error
type TableReaderSpec ¶
type TableReaderSpec struct { Table cockroach_sql_sqlbase1.TableDescriptor `protobuf:"bytes,1,opt,name=table" json:"table"` // If 0, we use the primary index. If non-zero, we use the index_idx-th index, // i.e. table.indexes[index_idx-1] IndexIdx uint32 `protobuf:"varint,2,opt,name=index_idx,json=indexIdx" json:"index_idx"` Reverse bool `protobuf:"varint,3,opt,name=reverse" json:"reverse"` Spans []TableReaderSpan `protobuf:"bytes,4,rep,name=spans" json:"spans"` // A hint for how many rows the consumer of the table reader output might // need. This is used to size the initial KV batches to try to avoid reading // many more rows than needed by the processor receiving the output. // // Not used if there is a limit set in the PostProcessSpec of this processor // (that value will be used for sizing batches instead). LimitHint int64 `protobuf:"varint,5,opt,name=limit_hint,json=limitHint" json:"limit_hint"` }
TableReaderSpec is the specification for a "table reader". A table reader performs KV operations to retrieve rows for a table and outputs the desired columns of the rows that pass a filter expression.
The "internal columns" of a TableReader (see ProcessorSpec) are all the columns of the table. Internally, only the values for the columns needed by the post-processing stage are be populated.
func (*TableReaderSpec) Descriptor ¶
func (*TableReaderSpec) Descriptor() ([]byte, []int)
func (*TableReaderSpec) Marshal ¶
func (m *TableReaderSpec) Marshal() (dAtA []byte, err error)
func (*TableReaderSpec) ProtoMessage ¶
func (*TableReaderSpec) ProtoMessage()
func (*TableReaderSpec) Reset ¶
func (m *TableReaderSpec) Reset()
func (*TableReaderSpec) Size ¶
func (m *TableReaderSpec) Size() (n int)
func (*TableReaderSpec) String ¶
func (m *TableReaderSpec) String() string
func (*TableReaderSpec) Unmarshal ¶
func (m *TableReaderSpec) Unmarshal(dAtA []byte) error
type TestingKnobs ¶
type TestingKnobs struct { // RunBeforeBackfillChunk is called before executing each chunk of a // backfill during a schema change operation. It is called with the // current span and returns an error which eventually is returned to the // caller of SchemaChanger.exec(). It is called at the start of the // backfill function passed into the transaction executing the chunk. RunBeforeBackfillChunk func(sp roachpb.Span) error // RunAfterBackfillChunk is called after executing each chunk of a // backfill during a schema change operation. It is called just before // returning from the backfill function passed into the transaction // executing the chunk. It is always called even when the backfill // function returns an error, or if the table has already been dropped. RunAfterBackfillChunk func() // MemoryLimitBytes specifies a maximum amount of working memory that a // processor that supports falling back to disk can use. Must be >= 1 to // enable. Once this limit is hit, processors employ their on-disk // implementation regardless of applicable cluster settings. MemoryLimitBytes int64 }
TestingKnobs are the testing knobs.
func (*TestingKnobs) ModuleTestingKnobs ¶
func (*TestingKnobs) ModuleTestingKnobs()
ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
type ValuesCoreSpec ¶
type ValuesCoreSpec struct { // There is one DatumInfo for each element in a row. Columns []DatumInfo `protobuf:"bytes,1,rep,name=columns" json:"columns"` // Each raw block encodes one or more data rows; each datum is encoded // according to the corresponding DatumInfo. RawBytes [][]byte `protobuf:"bytes,2,rep,name=raw_bytes,json=rawBytes" json:"raw_bytes,omitempty"` }
ValuesCoreSpec is the core of a processor that has no inputs and generates "pre-canned" rows. This is not intended to be used for very large datasets.
func (*ValuesCoreSpec) Descriptor ¶
func (*ValuesCoreSpec) Descriptor() ([]byte, []int)
func (*ValuesCoreSpec) Marshal ¶
func (m *ValuesCoreSpec) Marshal() (dAtA []byte, err error)
func (*ValuesCoreSpec) ProtoMessage ¶
func (*ValuesCoreSpec) ProtoMessage()
func (*ValuesCoreSpec) Reset ¶
func (m *ValuesCoreSpec) Reset()
func (*ValuesCoreSpec) Size ¶
func (m *ValuesCoreSpec) Size() (n int)
func (*ValuesCoreSpec) String ¶
func (m *ValuesCoreSpec) String() string
func (*ValuesCoreSpec) Unmarshal ¶
func (m *ValuesCoreSpec) Unmarshal(dAtA []byte) error
Source Files ¶
- aggregator.go
- algebraic_set_op.go
- api.go
- api.pb.go
- backfiller.go
- base.go
- columnbackfiller.go
- data.go
- data.pb.go
- disk_row_container.go
- distinct.go
- expr.go
- flow.go
- flow_diagram.go
- flow_registry.go
- flow_scheduler.go
- hash_row_container.go
- hashjoiner.go
- inbound.go
- indexbackfiller.go
- input_sync.go
- joinerbase.go
- joinreader.go
- mergejoiner.go
- metrics.go
- outbox.go
- processors.go
- processors.pb.go
- routers.go
- row_container.go
- server.go
- sorter.go
- sorterstrategy.go
- stream_decoder.go
- stream_encoder.go
- stream_group_accumulator.go
- stream_merger.go
- tablereader.go
- testutils.go
- values.go