Documentation ¶
Index ¶
Constants ¶
const ( APIPathUserQuery = "/query" APIPathInternalQuery = "/_query" APIPathUserStream = "/stream" APIPathInternalStream = "/_stream" APIPathReplicate = "/replicate" APIPathClusterState = "/_clusterstate" )
These are the store API URL paths.
Variables ¶
var ErrNoSegmentsAvailable = errors.New("no segments available")
ErrNoSegmentsAvailable is returned by various methods to indicate no qualifying segments are currently available.
Functions ¶
This section is empty.
Types ¶
type API ¶
type API struct {
// contains filtered or unexported fields
}
API serves the store API.
func NewAPI ¶
func NewAPI( peer *cluster.Peer, log Log, client *http.Client, replicatedSegments, replicatedBytes prometheus.Counter, duration *prometheus.HistogramVec, logger log.Logger, ) *API
NewAPI returns a usable API.
type Compacter ¶
type Compacter struct {
// contains filtered or unexported fields
}
Compacter is responsible for all post-flush segment mutation. That includes compacting highly-overlapping segments, compacting small and sequential segments, and enforcing the retention window.
func NewCompacter ¶
func NewCompacter(log Log, segmentTargetSize int64, retain time.Duration, purge time.Duration, compactDuration *prometheus.HistogramVec, trashSegments, purgeSegments *prometheus.CounterVec, logger log.Logger) *Compacter
NewCompacter creates a Compacter. Don't forget to Run it.
type Consumer ¶
type Consumer struct {
// contains filtered or unexported fields
}
Consumer reads segments from the ingesters, and replicates merged segments to the rest of the cluster. It's implemented as a state machine: gather segments, replicate, commit, and repeat. All failures invalidate the entire batch.
func NewConsumer ¶
func NewConsumer( peer *cluster.Peer, client *http.Client, segmentTargetSize int64, segmentTargetAge time.Duration, replicationFactor int, consumedSegments, consumedBytes prometheus.Counter, replicatedSegments, replicatedBytes prometheus.Counter, logger log.Logger, ) *Consumer
NewConsumer creates a consumer. Don't forget to Run it.
type Log ¶
type Log interface { // Create a new segment for writes. Create() (WriteSegment, error) // Query written and closed segments. // TODO(pb): convert parameters to QueryParams Query(qp QueryParams, statsOnly bool) (QueryResult, error) // Register a streaming query, giving results on the result chan. // Canceling the context closes the returned channel. Stream(context.Context, QueryParams) <-chan []byte // Overlapping returns segments that have a high degree of time overlap and // can be compacted. Overlapping() ([]ReadSegment, error) // Sequential returns segments that are small and sequential and can be // compacted. Sequential() ([]ReadSegment, error) // Trashable segments are read segments whose newest record is older than // the given time. They may be trashed, i.e. made unavailable for querying. Trashable(oldestRecord time.Time) ([]ReadSegment, error) // Purgable segments are trash segments whose modification time (i.e. the // time they were trashed) is older than the given time. They may be purged, // i.e. hard deleted. Purgeable(oldestModTime time.Time) ([]TrashSegment, error) // Stats of the current state of the store log. Stats() (LogStats, error) // Close the log, releasing any claimed lock. Close() error }
Log is an abstraction for segments on a storage node.
func NewFileLog ¶
func NewFileLog(filesys fs.Filesystem, root string, segmentTargetSize, segmentBufferSize int64) (Log, error)
NewFileLog returns a Log backed by the filesystem at path root. Note that we don't own segment files! They may disappear.
type LogStats ¶
type LogStats struct { ActiveSegments int64 ActiveBytes int64 FlushedSegments int64 FlushedBytes int64 ReadingSegments int64 ReadingBytes int64 TrashedSegments int64 TrashedBytes int64 }
LogStats describe the current state of the store log.
type QueryParams ¶ added in v0.1.3
type QueryParams struct { From time.Time `json:"from"` To time.Time `json:"to"` Q string `json:"q"` Regex bool `json:"regex"` }
QueryParams defines all dimensions of a query.
func (*QueryParams) DecodeFrom ¶ added in v0.1.3
func (qp *QueryParams) DecodeFrom(u *url.URL) error
DecodeFrom populates a QueryParams from a URL.
func (*QueryParams) EncodeTo ¶ added in v0.1.3
func (qp *QueryParams) EncodeTo(u *url.URL)
EncodeTo encodes the QueryParams to the url.Values.
func (*QueryParams) MarshalJSON ¶ added in v0.1.3
func (qp *QueryParams) MarshalJSON() ([]byte, error)
MarshalJSON implements json.Marshaler. It marshals the times as RFC3339Nano timestamps.
func (*QueryParams) UnmarshalJSON ¶ added in v0.1.3
func (qp *QueryParams) UnmarshalJSON(data []byte) error
UnmarshalJSON implements json.Unmarshaler. It parses the times as RFC3339Nano timestamps.
type QueryResult ¶
type QueryResult struct { Params QueryParams `json:"query"` NodesQueried int `json:"nodes_queried"` SegmentsQueried int `json:"segments_queried"` MaxDataSetSize int64 `json:"max_data_set_size"` ErrorCount int `json:"error_count,omitempty"` Duration string `json:"duration"` Records io.ReadCloser // TODO(pb): audit to ensure closing is valid throughout }
QueryResult contains statistics about, and matching records for, a query.
func (*QueryResult) DecodeFrom ¶
func (qr *QueryResult) DecodeFrom(resp *http.Response) error
DecodeFrom decodes the QueryResult from the HTTP response.
func (*QueryResult) EncodeTo ¶
func (qr *QueryResult) EncodeTo(w http.ResponseWriter)
EncodeTo encodes the QueryResult to the HTTP response writer. It also closes the records ReadCloser.
func (*QueryResult) Merge ¶
func (qr *QueryResult) Merge(other QueryResult) error
Merge the other QueryResult into this one.
type ReadSegment ¶
ReadSegment can be read from, reset (back to flushed state), trashed (made unavailable for queries), or purged (hard deleted).
type TrashSegment ¶
type TrashSegment interface {
Purge() error
}
TrashSegment may only be purged (hard deleted).