Documentation ¶
Index ¶
- func PutLogRows(lr *LogRows)
- func PutStreamTags(st *StreamTags)
- type BlockColumn
- type DatadbStats
- type Field
- type IndexdbStats
- type LogRows
- func (lr *LogRows) GetRowString(idx int) string
- func (lr *LogRows) Len() int
- func (lr *LogRows) Less(i, j int) bool
- func (lr *LogRows) MustAdd(tenantID TenantID, timestamp int64, fields []Field)
- func (lr *LogRows) NeedFlush() bool
- func (lr *LogRows) Reset()
- func (lr *LogRows) ResetKeepSettings()
- func (lr *LogRows) Swap(i, j int)
- type PartitionStats
- type Query
- type RowFormatter
- type Storage
- type StorageConfig
- type StorageStats
- type StreamFilter
- type StreamTags
- func (st *StreamTags) Add(name, value string)
- func (st *StreamTags) Len() int
- func (st *StreamTags) Less(i, j int) bool
- func (st *StreamTags) MarshalCanonical(dst []byte) []byte
- func (st *StreamTags) Reset()
- func (st *StreamTags) String() string
- func (st *StreamTags) Swap(i, j int)
- func (st *StreamTags) UnmarshalCanonical(src []byte) ([]byte, error)
- type TenantID
- type TimeFormatter
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
Types ¶
type BlockColumn ¶
type BlockColumn struct { // Name is the column name Name string // Values is column values Values []string }
BlockColumn is a single column of a block of data
type DatadbStats ¶
type DatadbStats struct { // InmemoryMergesTotal is the number of inmemory merges performed in the given datadb. InmemoryMergesTotal uint64 // InmemoryActiveMerges is the number of currently active inmemory merges performed by the given datadb. InmemoryActiveMerges uint64 // FileMergesTotal is the number of file merges performed in the given datadb. FileMergesTotal uint64 // FileActiveMerges is the number of currently active file merges performed by the given datadb. FileActiveMerges uint64 // InmemoryRowsCount is the number of rows, which weren't flushed to disk yet. InmemoryRowsCount uint64 // FileRowsCount is the number of rows stored on disk. FileRowsCount uint64 // InmemoryParts is the number of in-memory parts, which weren't flushed to disk yet. InmemoryParts uint64 // FileParts is the number of file-based parts stored on disk. FileParts uint64 // InmemoryBlocks is the number of in-memory blocks, which weren't flushed to disk yet. InmemoryBlocks uint64 // FileBlocks is the number of file-based blocks stored on disk. FileBlocks uint64 // CompressedInmemorySize is the size of compressed data stored in memory. CompressedInmemorySize uint64 // CompressedFileSize is the size of compressed data stored on disk. CompressedFileSize uint64 // UncompressedInmemorySize is the size of uncompressed data stored in memory. UncompressedInmemorySize uint64 // UncompressedFileSize is the size of uncompressed data stored on disk. UncompressedFileSize uint64 }
DatadbStats contains various stats for datadb.
func (*DatadbStats) RowsCount ¶
func (s *DatadbStats) RowsCount() uint64
RowsCount returns the number of rows stored in datadb.
type Field ¶
type Field struct { // Name is the name of the field Name string // Value is the value of the field Value string }
Field is a single field for the log entry.
type IndexdbStats ¶
type IndexdbStats struct { // StreamsCreatedTotal is the number of log streams created since the indexdb initialization. StreamsCreatedTotal uint64 // IndexdbSizeBytes is the size of data in indexdb. IndexdbSizeBytes uint64 // IndexdbItemsCount is the number of items in indexdb. IndexdbItemsCount uint64 // IndexdbBlocksCount is the number of blocks in indexdb. IndexdbBlocksCount uint64 // IndexdbPartsCount is the number of parts in indexdb. IndexdbPartsCount uint64 }
IndexdbStats contains indexdb stats
type LogRows ¶
type LogRows struct {
// contains filtered or unexported fields
}
LogRows holds a set of rows needed for Storage.MustAddRows
LogRows must be obtained via GetLogRows()
func GetLogRows ¶
GetLogRows returns LogRows from the pool for the given streamFields.
streamFields is a set of field names, which must be associated with the stream. ignoreFields is a set of field names, which must be ignored during data ingestion.
Return back it to the pool with PutLogRows() when it is no longer needed.
func (*LogRows) GetRowString ¶
GetRowString returns string representation of the row with the given idx.
func (*LogRows) Less ¶
Less returns true if (streamID, timestamp) for row i is smaller than the (streamID, timestamp) for row j
func (*LogRows) MustAdd ¶
MustAdd adds a log entry with the given args to lr.
It is OK to modify the args after returning from the function, since lr copies all the args to internal data.
func (*LogRows) NeedFlush ¶
NeedFlush returns true if lr contains too much data, so it must be flushed to the storage.
func (*LogRows) Reset ¶
func (lr *LogRows) Reset()
Reset resets lr with all its settings.
Call ResetKeepSettings() for resetting lr without resetting its settings.
func (*LogRows) ResetKeepSettings ¶
func (lr *LogRows) ResetKeepSettings()
ResetKeepSettings resets rows stored in lr, while keeping its settings passed to GetLogRows().
type PartitionStats ¶
type PartitionStats struct { DatadbStats IndexdbStats }
PartitionStats contains stats for the partition.
type Query ¶
type Query struct {
// contains filtered or unexported fields
}
Query represents LogsQL query.
type RowFormatter ¶
type RowFormatter []Field
RowFormatter implementes fmt.Stringer for []Field aka a single log row
func (*RowFormatter) String ¶
func (rf *RowFormatter) String() string
String returns user-readable representation for rf
type Storage ¶
type Storage struct {
// contains filtered or unexported fields
}
Storage is the storage for log entries.
func MustOpenStorage ¶
func MustOpenStorage(path string, cfg *StorageConfig) *Storage
MustOpenStorage opens Storage at the given path.
MustClose must be called on the returned Storage when it is no longer needed.
func (*Storage) MustClose ¶
func (s *Storage) MustClose()
MustClose closes s.
It is expected that nobody uses the storage at the close time.
func (*Storage) RunQuery ¶
func (s *Storage) RunQuery(tenantIDs []TenantID, q *Query, stopCh <-chan struct{}, processBlock func(columns []BlockColumn))
RunQuery runs the given q and calls processBlock for results
func (*Storage) UpdateStats ¶
func (s *Storage) UpdateStats(ss *StorageStats)
UpdateStats updates ss for the given s.
type StorageConfig ¶
type StorageConfig struct { // Retention is the retention for the ingested data. // // Older data is automatically deleted. Retention time.Duration // FlushInterval is the interval for flushing the in-memory data to disk at the Storage FlushInterval time.Duration // FutureRetention is the allowed retention from the current time to future for the ingested data. // // Log entries with timestamps bigger than now+FutureRetention are ignored. FutureRetention time.Duration // LogNewStreams indicates whether to log newly created log streams. // // This can be useful for debugging of high cardinality issues. // https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#high-cardinality LogNewStreams bool // LogIngestedRows indicates whether to log the ingested log entries. // // This can be useful for debugging of data ingestion. LogIngestedRows bool }
StorageConfig is the config for the Storage.
type StorageStats ¶
type StorageStats struct { // RowsDroppedTooBigTimestamp is the number of rows dropped during data ingestion because their timestamp is smaller than the minimum allowed RowsDroppedTooBigTimestamp uint64 // RowsDroppedTooSmallTimestamp is the number of rows dropped during data ingestion because their timestamp is bigger than the maximum allowed RowsDroppedTooSmallTimestamp uint64 // PartitionsCount is the number of partitions in the storage PartitionsCount uint64 PartitionStats }
StorageStats represents stats for the storage. It may be obtained by calling Storage.UpdateStats().
type StreamFilter ¶
type StreamFilter struct {
// contains filtered or unexported fields
}
StreamFilter is a filter for streams, e.g. `_stream:{...}`
func (*StreamFilter) String ¶
func (sf *StreamFilter) String() string
type StreamTags ¶
type StreamTags struct {
// contains filtered or unexported fields
}
StreamTags contains stream tags.
func (*StreamTags) Add ¶
func (st *StreamTags) Add(name, value string)
Add adds (name:value) tag to st.
func (*StreamTags) Less ¶
func (st *StreamTags) Less(i, j int) bool
Less returns true if tag i is smaller than the tag j.
func (*StreamTags) MarshalCanonical ¶
func (st *StreamTags) MarshalCanonical(dst []byte) []byte
MarshalCanonical marshal st in a canonical way
func (*StreamTags) String ¶
func (st *StreamTags) String() string
String returns string representation of st.
func (*StreamTags) UnmarshalCanonical ¶
func (st *StreamTags) UnmarshalCanonical(src []byte) ([]byte, error)
UnmarshalCanonical unmarshals st from src marshaled with MarshalCanonical.
type TenantID ¶
type TenantID struct { // AccountID is the id of the account for the log stream. AccountID uint32 // ProjectID is the id of the project for the log stream. ProjectID uint32 }
TenantID is an id of a tenant for log streams.
Each log stream is associated with a single TenantID.
func GetTenantIDFromRequest ¶
GetTenantIDFromRequest returns tenantID from r.
func GetTenantIDFromString ¶
GetTenantIDFromString returns tenantID from s. String is expected in the form of accountID:projectID
type TimeFormatter ¶
type TimeFormatter int64
TimeFormatter implements fmt.Stringer for timestamp in nanoseconds
func (*TimeFormatter) String ¶
func (tf *TimeFormatter) String() string
String returns human-readable representation for tf.
Source Files ¶
- arena.go
- block.go
- block_data.go
- block_header.go
- block_search.go
- block_stream_merger.go
- block_stream_reader.go
- block_stream_writer.go
- bloomfilter.go
- consts.go
- datadb.go
- encoding.go
- filenames.go
- filters.go
- hash128.go
- index_block_header.go
- indexdb.go
- inmemory_part.go
- log_rows.go
- parser.go
- part.go
- part_header.go
- partition.go
- rows.go
- storage.go
- storage_search.go
- stream_filter.go
- stream_id.go
- stream_tags.go
- tenant_id.go
- tokenizer.go
- u128.go
- values_encoder.go