Documentation ¶
Index ¶
- Constants
- Variables
- func CompactedMetaFileName(blockID uuid.UUID, tenantID string) string
- func MetaFileName(blockID uuid.UUID, tenantID string) string
- func ObjectFileName(keypath KeyPath, name string) string
- func RootPath(blockID uuid.UUID, tenantID string) string
- func SupportedEncodingString() string
- type AppendTracker
- type BlockMeta
- type CompactedBlockMeta
- type Compactor
- type Encoding
- type KeyPath
- type MockCompactor
- type MockRawReader
- func (m *MockRawReader) List(ctx context.Context, keypath KeyPath) ([]string, error)
- func (m *MockRawReader) Read(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
- func (m *MockRawReader) ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, ...) error
- func (m *MockRawReader) Shutdown()
- type MockRawWriter
- func (m *MockRawWriter) Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, ...) (AppendTracker, error)
- func (m *MockRawWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error
- func (m *MockRawWriter) Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, ...) error
- type MockReader
- func (m *MockReader) BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)
- func (m *MockReader) Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error)
- func (m *MockReader) Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, ...) ([]byte, error)
- func (m *MockReader) ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, ...) error
- func (m *MockReader) ReadTracepointBlock(ctx context.Context, name string) (io.ReadCloser, int64, error)
- func (m *MockReader) Shutdown()
- func (m *MockReader) StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error)
- func (m *MockReader) TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)
- func (m *MockReader) Tenants(ctx context.Context) ([]string, error)
- type MockWriter
- func (m *MockWriter) Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, ...) (AppendTracker, error)
- func (m *MockWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error
- func (m *MockWriter) StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, ...) error
- func (m *MockWriter) Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, ...) error
- func (m *MockWriter) WriteBlockMeta(ctx context.Context, meta *BlockMeta) error
- func (m *MockWriter) WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, ...) error
- func (m *MockWriter) WriteTracepointBlock(ctx context.Context, name string, data *bytes.Reader, size int64) error
- type RawReader
- type RawWriter
- type Reader
- type TenantIndex
- type Writer
Constants ¶
const ( MetaName = "meta.json" CompactedMetaName = "meta.compacted.json" TenantIndexName = "index.json.gz" // ClusterSeedFileName File name for the cluster seed file. ClusterSeedFileName = "deep_cluster_seed.json" )
Variables ¶
var ( ErrDoesNotExist = fmt.Errorf("does not exist") ErrEmptyTenantID = fmt.Errorf("empty tenant id") ErrEmptyBlockID = fmt.Errorf("empty block id") ErrBadSeedFile = fmt.Errorf("bad seed file") )
var SupportedEncoding = []Encoding{ EncNone, EncGZIP, EncLZ4_64k, EncLZ4_256k, EncLZ4_1M, EncLZ4_4M, EncSnappy, EncZstd, EncS2, }
SupportedEncoding is a slice of all supported encodings
Functions ¶
func CompactedMetaFileName ¶
CompactedMetaFileName returns the object name for the compacted block meta given a block id and tenantID
func MetaFileName ¶
MetaFileName returns the object name for the block meta given a block id and tenantID
func ObjectFileName ¶
ObjectFileName returns a unique identifier for an object in object storage given its name and keypath
func SupportedEncodingString ¶
func SupportedEncodingString() string
SupportedEncodingString returns the list of supported Encoding.
Types ¶
type AppendTracker ¶
type AppendTracker interface{}
AppendTracker is an empty interface usable by the backend to track a long-running append operation
type BlockMeta ¶
type BlockMeta struct { Version string `json:"format"` // Version indicates the block format version. This includes specifics of how the indexes and data is stored BlockID uuid.UUID `json:"blockID"` // Unique block id MinID []byte `json:"minID"` // Minimum object id stored in this block MaxID []byte `json:"maxID"` // Maximum object id stored in this block TenantID string `json:"tenantID"` // ID of tenant to which this block belongs StartTime time.Time `json:"startTime"` // Roughly matches when the first obj was written to this block. Used to determine block age for different purposes (caching, etc) EndTime time.Time `json:"endTime"` // Currently mostly meaningless but roughly matches to the time the last obj was written to this block TotalObjects int `json:"totalObjects"` // Total objects in this block Size uint64 `json:"size"` // Total size in bytes of the data object CompactionLevel uint8 `json:"compactionLevel"` // Kind of the number of times this block has been compacted Encoding Encoding `json:"encoding"` // Encoding/compression format IndexPageSize uint32 `json:"indexPageSize"` // Size of each index page in bytes TotalRecords uint32 `json:"totalRecords"` // Total Records stored in the index file DataEncoding string `json:"dataEncoding"` // DataEncoding is a string provided externally, but tracked by deepdb that indicates the way the bytes are encoded BloomShardCount uint16 `json:"bloomShards"` // Number of bloom filter shards }
func NewBlockMeta ¶
func (*BlockMeta) ObjectAdded ¶
ObjectAdded updates the block meta appropriately based on information about an added record start/end are unix epoch seconds
type CompactedBlockMeta ¶
type Compactor ¶
type Compactor interface { // MarkBlockCompacted marks a block as compacted. Call this after a block has been successfully compacted to a new block MarkBlockCompacted(blockID uuid.UUID, tenantID string) error // ClearBlock removes a block from the backend ClearBlock(blockID uuid.UUID, tenantID string) error // CompactedBlockMeta returns the compacted block meta given a block and tenant id CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error) }
Compactor is a collection of methods to interact with compacted elements of a deepdb block
type Encoding ¶
type Encoding byte
Encoding is the identifier for a chunk encoding.
const ( EncNone Encoding = iota EncGZIP EncLZ4_64k EncLZ4_256k EncLZ4_1M EncLZ4_4M EncSnappy EncZstd EncS2 )
The different available encodings. Make sure to preserve the order, as these numeric values are written to the chunks!
func ParseEncoding ¶
ParseEncoding parses a chunk encoding (compression algorithm) by its name.
func (Encoding) MarshalJSON ¶
MarshalJSON implements the marshaler interface of the json pkg.
func (Encoding) MarshalYAML ¶
MarshalYAML implements the Marshaler interface of the yaml pkg
func (*Encoding) UnmarshalJSON ¶
UnmarshalJSON implements the Unmarshaler interface of the json pkg.
func (*Encoding) UnmarshalYAML ¶
UnmarshalYAML implements the Unmarshaler interface of the yaml pkg.
type KeyPath ¶
type KeyPath []string
KeyPath is an ordered set of strings that govern where data is read/written from the backend
type MockCompactor ¶
type MockCompactor struct {
BlockMetaFn func(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
}
MockCompactor
func (*MockCompactor) ClearBlock ¶
func (c *MockCompactor) ClearBlock(blockID uuid.UUID, tenantID string) error
func (*MockCompactor) CompactedBlockMeta ¶
func (c *MockCompactor) CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
func (*MockCompactor) MarkBlockCompacted ¶
func (c *MockCompactor) MarkBlockCompacted(blockID uuid.UUID, tenantID string) error
type MockRawReader ¶
type MockRawReader struct { L []string ListFn func(ctx context.Context, keypath KeyPath) ([]string, error) R []byte // read Range []byte // ReadRange ReadFn func(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error) }
MockRawReader
func (*MockRawReader) Read ¶
func (m *MockRawReader) Read(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
func (*MockRawReader) Shutdown ¶
func (m *MockRawReader) Shutdown()
type MockRawWriter ¶
type MockRawWriter struct {
// contains filtered or unexported fields
}
MockRawWriter
func (*MockRawWriter) Append ¶
func (m *MockRawWriter) Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error)
func (*MockRawWriter) CloseAppend ¶
func (m *MockRawWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error
type MockReader ¶
type MockReader struct { T []string B []uuid.UUID // blocks BlockFn func(ctx context.Context, tenantID string) ([]uuid.UUID, error) M *BlockMeta // meta BlockMetaFn func(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error) TenantIndexFn func(ctx context.Context, tenantID string) (*TenantIndex, error) R []byte // read Range []byte // ReadRange ReadFn func(name string, blockID uuid.UUID, tenantID string) ([]byte, error) }
MockReader
func (*MockReader) ReadTracepointBlock ¶
func (m *MockReader) ReadTracepointBlock(ctx context.Context, name string) (io.ReadCloser, int64, error)
func (*MockReader) Shutdown ¶
func (m *MockReader) Shutdown()
func (*MockReader) StreamReader ¶
func (*MockReader) TenantIndex ¶
func (m *MockReader) TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)
type MockWriter ¶
type MockWriter struct { IndexMeta map[string][]*BlockMeta IndexCompactedMeta map[string][]*CompactedBlockMeta }
MockWriter
func (*MockWriter) Append ¶
func (m *MockWriter) Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error)
func (*MockWriter) CloseAppend ¶
func (m *MockWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error
func (*MockWriter) StreamWriter ¶
func (*MockWriter) WriteBlockMeta ¶
func (m *MockWriter) WriteBlockMeta(ctx context.Context, meta *BlockMeta) error
func (*MockWriter) WriteTenantIndex ¶
func (m *MockWriter) WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error
func (*MockWriter) WriteTracepointBlock ¶
type RawReader ¶
type RawReader interface { // List returns all objects one level beneath the provided keypath List(ctx context.Context, keypath KeyPath) ([]string, error) // Read is for streaming entire objects from the backend. There will be an attempt to retrieve this from cache if shouldCache is true. Read(ctx context.Context, name string, keyPath KeyPath, shouldCache bool) (io.ReadCloser, int64, error) // ReadRange is for reading parts of large objects from the backend. // There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, buffer []byte, shouldCache bool) error // Shutdown must be called when the Reader is finished and cleans up any associated resources. Shutdown() }
RawReader is a collection of methods to read data from deepdb backends
type RawWriter ¶
type RawWriter interface { // Write is for in memory data. shouldCache specifies whether caching should be attempted. Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, shouldCache bool) error // Append starts or continues an Append job. Pass nil to AppendTracker to start a job. Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error) // CloseAppend closes any resources associated with the AppendTracker. CloseAppend(ctx context.Context, tracker AppendTracker) error }
RawWriter is a collection of methods to write data to deepdb backends
type Reader ¶
type Reader interface { // Read is for reading entire objects from the backend. There will be an attempt to retrieve this // from cache if shouldCache is true. Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, shouldCache bool) ([]byte, error) // StreamReader is for streaming entire objects from the backend. It is expected this will _not_ be cached. StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error) // ReadRange is for reading parts of large objects from the backend. // There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, offset uint64, buffer []byte, shouldCache bool) error // Tenants returns a list of all tenants in a backend Tenants(ctx context.Context) ([]string, error) // Blocks returns a list of block UUIDs given a tenant Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error) // BlockMeta returns the block meta given a block and tenant id BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error) // TenantIndex returns lists of all metas given a tenant TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error) // Shutdown shuts...down? Shutdown() // ReadTracepointBlock reads the tracepoint block for the given tenantID ReadTracepointBlock(ctx context.Context, tenantID string) (io.ReadCloser, int64, error) }
Reader is a collection of methods to read data from deepdb backends
type TenantIndex ¶
type TenantIndex struct { CreatedAt time.Time `json:"created_at"` Meta []*BlockMeta `json:"meta"` CompactedMeta []*CompactedBlockMeta `json:"compacted"` }
TenantIndex holds a list of all metas and compacted metas for a given tenant it is probably stored in /<tenantID>/blockindex.json.gz as a gzipped json file
type Writer ¶
type Writer interface { // Write is for in memory data. shouldCache specifies whether caching should be attempted. Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, buffer []byte, shouldCache bool) error // StreamWriter is for larger data payloads streamed through an io.Reader. It is expected this will _not_ be cached. StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, data io.Reader, size int64) error // WriteBlockMeta writes a block meta to its blocks WriteBlockMeta(ctx context.Context, meta *BlockMeta) error // Append starts or continues an Append job. Pass nil to AppendTracker to start a job. Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error) // CloseAppend closes any resources associated with the AppendTracker CloseAppend(ctx context.Context, tracker AppendTracker) error // WriteTenantIndex writes the two meta slices as a tenant index WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error // WriteTracepointBlock writes the tracepoint block for the given tenantID WriteTracepointBlock(ctx context.Context, tenantID string, data *bytes.Reader, size int64) error }
Writer is a collection of methods to write data to deepdb backends