Documentation ¶
Overview ¶
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
Index ¶
- Constants
- Variables
- func GetMetaLen(option *obs.ObsOptions, path string) (int64, error)
- func PutMetaData(queryID string, meta []*MetaData)
- func UpdateMetaData(old, new cache.Entry) bool
- type BlockLogReader
- type ContentReader
- type DataReader
- type IndexReader
- type MatchAllOperator
- type MetaControl
- type MetaData
- type MetaDataInfo
- type MetaDatas
- type MetaQueue
- type MetaReader
- type MetaStack
- type MetaStorageReader
- type RegexpMatchAll
- type SegmentMetaDataEntry
- func (e *SegmentMetaDataEntry) GetKey() string
- func (e *SegmentMetaDataEntry) GetTime() time.Time
- func (e *SegmentMetaDataEntry) GetValue() interface{}
- func (e *SegmentMetaDataEntry) SetTime(time time.Time)
- func (e *SegmentMetaDataEntry) SetValue(value interface{})
- func (e *SegmentMetaDataEntry) Size() int64
- type SegmentReader
- type UnnestMatch
- type UnnestMatchAll
- type UnnestOperator
Constants ¶
const ( RecordPoolNum = 3 BLOCK_LOG_NAME = "segment.blg" BLOCK_LOG_CHECK_N_BYTES = 4 + 4 TAGS_RECORD_PREFIX_N_BYTES = 4 FLAG_TAGS = 0 FLAG_CONTENT = 1 FLAG_LENGTH_MASK = 0xffffff FLAG_HEAD_SHIFT = 30 RECORD_DATA_MAX_N_BYTES = 1024 * 1024 CONTENT_RECORD_PREFIX_N_BYTES = 4 + 4 + 8 + 8 CONTENT_RECORD_MAX_N_BYTES = CONTENT_RECORD_PREFIX_N_BYTES + RECORD_DATA_MAX_N_BYTES RECORD_MAX_N_BYTES = CONTENT_RECORD_MAX_N_BYTES BLOCK_FLUSH_TRIGGER_N_BYTES = 1024 * 1024 FLAG_BLOCK_CHECK = 2 BLOCK_PREFIX_N_BYTES = 8 + 4 )
const ( CONTENT_NAME = "segment.cnt" MAX_BLOCK_LOG_STORE_N_BYTES = BLOCK_FLUSH_TRIGGER_N_BYTES + (RECORD_MAX_N_BYTES * 2) )
const ( TAG_FIELD = "tag" CONTENT_FIELD = "content" CircularRecordNum = 6 // reader cache 4, hash agg and index scan each cache 1 ContentFilterDurationSpan = "content_filter_duration" )
const ( META_DATA_N_BYTES int32 = 8 + 8 + 8 + 8 + 8 + 4 + 4 META_STORE_N_BYTES int32 = META_DATA_N_BYTES + 4 META_DATA_SIZE int32 = META_STORE_N_BYTES + 4 )
const ( META_NAME = "segment.meta" MAX_SCAN_META_COUNT = 16 * 1024 MAX_SCAN_META_Length = MAX_SCAN_META_COUNT * META_STORE_N_BYTES )
const ( QueryMetaCacheTTL = 10 * time.Minute QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(META_DATA_SIZE) )
const ( ReaderContentSpan = "reader_content_num_span" ReaderReadContentSizeSpan = "reader_content_read_size_span" ReaderReadContentDuration = "reader_content_read_duration" MetaReadNumSpan = "meta_read_num_span" MetaReadDurationSpan = "meta_read_duration" )
Variables ¶
var (
FileCursorPool = record.NewRecordPool(record.LogStoreReaderPool)
)
var QueryMetaDataCache = cache.NewCache(QueryMetaDataCacheSize, QueryMetaCacheTTL)
Functions ¶
func GetMetaLen ¶
func GetMetaLen(option *obs.ObsOptions, path string) (int64, error)
func PutMetaData ¶
func UpdateMetaData ¶
Types ¶
type BlockLogReader ¶
type BlockLogReader struct {
// contains filtered or unexported fields
}
func NewBlockLogReader ¶
func NewBlockLogReader(object *obs.ObsOptions, isStat bool, path string) (*BlockLogReader, error)
func (*BlockLogReader) Close ¶
func (c *BlockLogReader) Close()
type ContentReader ¶
type ContentReader struct {
// contains filtered or unexported fields
}
func NewContentReader ¶
func (*ContentReader) Close ¶
func (s *ContentReader) Close()
func (*ContentReader) StartSpan ¶
func (s *ContentReader) StartSpan(span *tracing.Span)
type DataReader ¶
type DataReader struct {
// contains filtered or unexported fields
}
func NewDataReader ¶
func (*DataReader) Close ¶
func (s *DataReader) Close()
func (*DataReader) StartSpan ¶
func (s *DataReader) StartSpan(span *tracing.Span)
func (*DataReader) UnnestNext ¶
func (s *DataReader) UnnestNext() (*record.Record, error)
type IndexReader ¶
type IndexReader struct {
// contains filtered or unexported fields
}
func NewIndexReader ¶
func NewIndexReader(option *obs.ObsOptions, version uint32, path string, tr util.TimeRange, opt hybridqp.Options) (*IndexReader, error)
func (*IndexReader) Close ¶
func (s *IndexReader) Close()
func (*IndexReader) Get ¶
func (s *IndexReader) Get() ([]*MetaData, error)
func (*IndexReader) GetMaxBlockId ¶
func (s *IndexReader) GetMaxBlockId() int64
func (*IndexReader) GetMinMaxTime ¶
func (s *IndexReader) GetMinMaxTime() (int64, int64)
func (*IndexReader) StartSpan ¶
func (s *IndexReader) StartSpan(span *tracing.Span)
type MatchAllOperator ¶
type MatchAllOperator struct {
// contains filtered or unexported fields
}
func (*MatchAllOperator) Compute ¶
func (r *MatchAllOperator) Compute(rec *record.Record)
type MetaControl ¶
type MetaControl interface { Push(MetaDataInfo) Pop() (MetaDataInfo, bool) IsEmpty() bool }
func NewMetaControl ¶
func NewMetaControl(isQueue bool, count int) MetaControl
type MetaData ¶
type MetaData struct {
// contains filtered or unexported fields
}
func GetMetaData ¶
func (*MetaData) GetBlockIndex ¶
func (*MetaData) GetContentBlockLength ¶
func (*MetaData) GetContentBlockOffset ¶
func (*MetaData) GetMaxTime ¶
func (*MetaData) GetMinTime ¶
type MetaDataInfo ¶
type MetaQueue ¶
type MetaQueue []MetaDataInfo
func (*MetaQueue) Pop ¶
func (q *MetaQueue) Pop() (MetaDataInfo, bool)
func (*MetaQueue) Push ¶
func (q *MetaQueue) Push(v MetaDataInfo)
type MetaReader ¶
type MetaReader interface { Close() // contains filtered or unexported methods }
func NewMetaReader ¶
func NewMetaReader(option *obs.ObsOptions, path string, offset int64, length int64, tr util.TimeRange, isCache bool, isStat bool) (MetaReader, error)
todo: cache reader
type MetaStack ¶
type MetaStack []MetaDataInfo
func (*MetaStack) Pop ¶
func (s *MetaStack) Pop() (MetaDataInfo, bool)
func (*MetaStack) Push ¶
func (s *MetaStack) Push(value MetaDataInfo)
type MetaStorageReader ¶
type MetaStorageReader struct {
// contains filtered or unexported fields
}
func NewMetaStorageReader ¶
func NewMetaStorageReader(option *obs.ObsOptions, path string, offset int64, length int64, tr util.TimeRange, isStat bool) (*MetaStorageReader, error)
func (*MetaStorageReader) Close ¶
func (m *MetaStorageReader) Close()
type RegexpMatchAll ¶
type RegexpMatchAll struct {
// contains filtered or unexported fields
}
func (*RegexpMatchAll) Get ¶
func (r *RegexpMatchAll) Get(data [][]byte) [][]byte
type SegmentMetaDataEntry ¶
type SegmentMetaDataEntry struct {
// contains filtered or unexported fields
}
func NewSegmentMetaDataEntry ¶
func NewSegmentMetaDataEntry(segmentID string) *SegmentMetaDataEntry
func (*SegmentMetaDataEntry) GetKey ¶
func (e *SegmentMetaDataEntry) GetKey() string
func (*SegmentMetaDataEntry) GetTime ¶
func (e *SegmentMetaDataEntry) GetTime() time.Time
func (*SegmentMetaDataEntry) GetValue ¶
func (e *SegmentMetaDataEntry) GetValue() interface{}
func (*SegmentMetaDataEntry) SetTime ¶
func (e *SegmentMetaDataEntry) SetTime(time time.Time)
func (*SegmentMetaDataEntry) SetValue ¶
func (e *SegmentMetaDataEntry) SetValue(value interface{})
func (*SegmentMetaDataEntry) Size ¶
func (e *SegmentMetaDataEntry) Size() int64
type SegmentReader ¶
type SegmentReader struct {
// contains filtered or unexported fields
}
func NewSegmentReader ¶
func (*SegmentReader) Close ¶
func (s *SegmentReader) Close()
func (*SegmentReader) GetRowCount ¶
func (s *SegmentReader) GetRowCount() int64
func (*SegmentReader) SetTr ¶
func (s *SegmentReader) SetTr(tr util.TimeRange)
func (*SegmentReader) StartSpan ¶
func (s *SegmentReader) StartSpan(span *tracing.Span)
func (*SegmentReader) UpdateTr ¶
func (s *SegmentReader) UpdateTr(time int64)
type UnnestMatch ¶
func GetUnnestFunc ¶
func NewRegexpMatchAll ¶
func NewRegexpMatchAll(unnest *influxql.Unnest, schema record.Schemas) UnnestMatch
type UnnestMatchAll ¶
type UnnestMatchAll struct {
// contains filtered or unexported fields
}
func NewUnnestMatchAll ¶
func NewUnnestMatchAll(unnest *influxql.Unnest) (*UnnestMatchAll, error)
type UnnestOperator ¶
func GetUnnestFuncOperator ¶
func NewMatchAllOperator ¶
func NewMatchAllOperator(unnest *influxql.Unnest, schemas record.Schemas) UnnestOperator