Documentation ¶
Index ¶
- Constants
- Variables
- func CheckTime(t time.Time) error
- func EscapeStringField(in string) string
- func GetPrecisionMultiplier(precision string) int64
- func MakeKey(name []byte, tags Tags) []byte
- func SafeCalcTime(timestamp int64, precision string) (time.Time, error)
- type ConsistencyLevel
- type FieldIterator
- type FieldType
- type Fields
- type InlineFNV64a
- type Point
- func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point
- func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error)
- func NewPointFromBytes(b []byte) (Point, error)
- func ParsePoints(buf []byte) ([]Point, error)
- func ParsePointsString(buf string) ([]Point, error)
- func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error)
- type Points
- type Row
- type Rows
- type Statistic
- type StatisticTags
- type Tag
- type Tags
- func (a *Tags) Delete(key []byte)
- func (a Tags) Get(key []byte) []byte
- func (a Tags) GetString(key string) string
- func (a Tags) HashKey() []byte
- func (a Tags) Len() int
- func (a Tags) Less(i, j int) bool
- func (a Tags) Map() map[string]string
- func (a Tags) Merge(other map[string]string) Tags
- func (a *Tags) Set(key, value []byte)
- func (a *Tags) SetString(key, value string)
- func (a Tags) Swap(i, j int)
Constants ¶
const ( // MinNanoTime is the minumum time that can be represented. // // 1677-09-21 00:12:43.145224194 +0000 UTC // // The two lowest minimum integers are used as sentinel values. The // minimum value needs to be used as a value lower than any other value for // comparisons and another separate value is needed to act as a sentinel // default value that is unusable by the user, but usable internally. // Because these two values need to be used for a special purpose, we do // not allow users to write points at these two times. MinNanoTime = int64(math.MinInt64) + 2 // MaxNanoTime is the maximum time that can be represented. // // 2262-04-11 23:47:16.854775806 +0000 UTC // // The highest time represented by a nanosecond needs to be used for an // exclusive range in the shard group, so the maximum time needs to be one // less than the possible maximum number of nanoseconds representable by an // int64 so that we don't lose a point at that one time. MaxNanoTime = int64(math.MaxInt64) - 1 )
const (
MaxKeyLength = 65535
)
Variables ¶
var ( ErrPointMustHaveAField = errors.New("point without fields is unsupported") ErrInvalidNumber = errors.New("invalid number") ErrInvalidPoint = errors.New("point is invalid") ErrMaxKeyLengthExceeded = errors.New("max key length exceeded") )
var ( // ErrInvalidConsistencyLevel is returned when parsing the string version // of a consistency level. ErrInvalidConsistencyLevel = errors.New("invalid consistency level") )
var ( // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) )
Functions ¶
func EscapeStringField ¶ added in v1.0.0
EscapeStringField returns a copy of in with any double quotes or backslashes with escaped values
func GetPrecisionMultiplier ¶ added in v0.10.0
GetPrecisionMultiplier will return a multiplier for the precision specified
Types ¶
type ConsistencyLevel ¶ added in v0.12.0
type ConsistencyLevel int
ConsistencyLevel represent a required replication criteria before a write can be returned as successful
const ( // ConsistencyLevelAny allows for hinted hand off, potentially no write happened yet ConsistencyLevelAny ConsistencyLevel = iota // ConsistencyLevelOne requires at least one data node acknowledged a write ConsistencyLevelOne // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write ConsistencyLevelQuorum // ConsistencyLevelAll requires all data nodes to acknowledge a write ConsistencyLevelAll )
func ParseConsistencyLevel ¶ added in v0.12.0
func ParseConsistencyLevel(level string) (ConsistencyLevel, error)
ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const
type FieldIterator ¶ added in v1.1.0
type Fields ¶
type Fields map[string]interface{}
Fields represents a mapping between a Point's field names and their values.
func (Fields) MarshalBinary ¶
MarshalBinary encodes all the fields to their proper type and returns the binary represenation NOTE: uint64 is specifically not supported due to potential overflow when we decode again later to an int64 NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted...
type InlineFNV64a ¶ added in v1.1.0
type InlineFNV64a uint64
InlineFNV64a is an alloc-free port of the standard library's fnv64a.
func NewInlineFNV64a ¶ added in v1.1.0
func NewInlineFNV64a() InlineFNV64a
func (*InlineFNV64a) Sum64 ¶ added in v1.1.0
func (s *InlineFNV64a) Sum64() uint64
type Point ¶
type Point interface { Name() string SetName(string) Tags() Tags AddTag(key, value string) SetTags(tags Tags) Fields() Fields Time() time.Time SetTime(t time.Time) UnixNano() int64 HashID() uint64 Key() []byte Data() []byte SetData(buf []byte) // String returns a string representation of the point, if there is a // timestamp associated with the point then it will be specified with the default // precision of nanoseconds String() string // Bytes returns a []byte representation of the point similar to string. MarshalBinary() ([]byte, error) // PrecisionString returns a string representation of the point, if there // is a timestamp associated with the point then it will be specified in the // given unit PrecisionString(precision string) string // RoundedString returns a string representation of the point, if there // is a timestamp associated with the point, then it will be rounded to the // given duration RoundedString(d time.Duration) string // Split will attempt to return multiple points with the same timestamp whose // string representations are no longer than size. Points with a single field or // a point without a timestamp may exceed the requested size. Split(size int) []Point // Round will round the timestamp of the point to the given duration Round(d time.Duration) // StringSize returns the length of the string that would be returned by String() StringSize() int // AppendString appends the result of String() to the provided buffer and returns // the result, potentially reducing string allocations AppendString(buf []byte) []byte // FieldIterator retuns a FieldIterator that can be used to traverse the // fields of a point without constructing the in-memory map FieldIterator() FieldIterator }
Point defines the values that will be written to the database
func MustNewPoint ¶
MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If an unsupported field value (NaN) is passed, this function panics.
func NewPoint ¶
NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If an unsupported field value (NaN) or out of range time is passed, this function returns an error.
func NewPointFromBytes ¶ added in v0.9.6
NewPointFromBytes returns a new Point from a marshalled Point.
func ParsePoints ¶
ParsePoints returns a slice of Points from a text representation of a point with each point separated by newlines. If any points fail to parse, a non-nil error will be returned in addition to the points that parsed successfully.
func ParsePointsString ¶
ParsePointsString is identical to ParsePoints but accepts a string buffer.
type Row ¶
type Row struct { Name string `json:"name,omitempty"` Tags map[string]string `json:"tags,omitempty"` Columns []string `json:"columns,omitempty"` Values [][]interface{} `json:"values,omitempty"` }
Row represents a single row returned from the execution of a statement.
func (*Row) SameSeries ¶
SameSeries returns true if r contains values for the same series as o.
type Statistic ¶ added in v1.0.0
type Statistic struct { Name string `json:"name"` Tags map[string]string `json:"tags"` Values map[string]interface{} `json:"values"` }
func NewStatistic ¶ added in v1.1.0
type StatisticTags ¶ added in v1.1.0
StatisticTags is a map that can be merged with others without causing mutations to either map.
func (StatisticTags) Merge ¶ added in v1.1.0
func (t StatisticTags) Merge(tags map[string]string) map[string]string
Merge creates a new map containing the merged contents of tags and t. If both tags and the receiver map contain the same key, the value in tags is used in the resulting map.
Merge always returns a usable map.
type Tags ¶
type Tags []Tag
Tags represents a sorted list of tags.
func (Tags) Merge ¶ added in v1.0.0
Merge merges the tags combining the two. If both define a tag with the same key, the merged value overwrites the old value. A new map is returned.