Documentation ¶
Index ¶
- Constants
- Variables
- func GetTableIndexPrefixes(rd io.ReadSeeker) (prefixes []uint64, err error)
- func GuessPrefixOrdinal(prefix uint64, n uint32) int
- func IterChunks(rd io.ReadSeeker, cb func(chunk chunks.Chunk) (stop bool, err error)) error
- func MaybeMigrateFileManifest(ctx context.Context, dir string) (bool, error)
- func NewIndexTransformer(src io.Reader, chunkCount int) io.Reader
- func OverwriteStoreManifest(ctx context.Context, store *NomsBlockStore, root hash.Hash, ...) (err error)
- func ReadTableFooter(rd io.ReadSeeker) (chunkCount uint32, totalUncompressedData uint64, err error)
- func ReadTableIndexByCopy(rd io.ReadSeeker, q MemoryQuotaProvider) (onHeapTableIndex, error)
- func ValidateAddr(s string) bool
- func WriteChunks(chunks []chunks.Chunk) (string, []byte, error)
- type BlockBufferByteSink
- type BufferedFileByteSink
- type ByteSink
- type CmpChunkTableWriter
- func (tw *CmpChunkTableWriter) AddCmpChunk(c CompressedChunk) error
- func (tw *CmpChunkTableWriter) ChunkCount() uint32
- func (tw *CmpChunkTableWriter) ContentLength() uint64
- func (tw *CmpChunkTableWriter) Finish() (string, error)
- func (tw *CmpChunkTableWriter) Flush(wr io.Writer) error
- func (tw *CmpChunkTableWriter) FlushToFile(path string) error
- func (tw *CmpChunkTableWriter) GetMD5() []byte
- func (tw *CmpChunkTableWriter) Size() int
- type CompressedChunk
- type FixedBufferByteSink
- type GenerationalNBS
- func (gcs *GenerationalNBS) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
- func (gcs *GenerationalNBS) Close() error
- func (gcs *GenerationalNBS) Commit(ctx context.Context, current, last hash.Hash) (bool, error)
- func (gcs *GenerationalNBS) Get(ctx context.Context, h hash.Hash) (chunks.Chunk, error)
- func (gcs *GenerationalNBS) GetMany(ctx context.Context, hashes hash.HashSet, ...) error
- func (gcs *GenerationalNBS) GetManyCompressed(ctx context.Context, hashes hash.HashSet, ...) error
- func (gcs *GenerationalNBS) Has(ctx context.Context, h hash.Hash) (bool, error)
- func (gcs *GenerationalNBS) HasMany(ctx context.Context, hashes hash.HashSet) (absent hash.HashSet, err error)
- func (gcs *GenerationalNBS) NewGen() chunks.ChunkStoreGarbageCollector
- func (gcs *GenerationalNBS) OldGen() chunks.ChunkStoreGarbageCollector
- func (gcs *GenerationalNBS) PruneTableFiles(ctx context.Context) error
- func (gcs *GenerationalNBS) Put(ctx context.Context, c chunks.Chunk) error
- func (gcs *GenerationalNBS) Rebase(ctx context.Context) error
- func (gcs *GenerationalNBS) Root(ctx context.Context) (hash.Hash, error)
- func (gcs *GenerationalNBS) SetRootChunk(ctx context.Context, root, previous hash.Hash) error
- func (gcs *GenerationalNBS) Size(ctx context.Context) (uint64, error)
- func (gcs *GenerationalNBS) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
- func (gcs *GenerationalNBS) Stats() interface{}
- func (gcs *GenerationalNBS) StatsSummary() string
- func (gcs *GenerationalNBS) SupportedOperations() TableFileStoreOps
- func (gcs *GenerationalNBS) Version() string
- func (gcs *GenerationalNBS) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, ...) error
- type HashingByteSink
- type ManifestAppendixOption
- type ManifestInfo
- type MemoryQuotaProvider
- type NBSCompressedChunkStore
- type NBSMetricWrapper
- func (nbsMW *NBSMetricWrapper) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
- func (nbsMW *NBSMetricWrapper) GetManyCompressed(ctx context.Context, hashes hash.HashSet, ...) error
- func (nbsMW *NBSMetricWrapper) MarkAndSweepChunks(ctx context.Context, last hash.Hash, keepChunks <-chan []hash.Hash, ...) error
- func (nbsMW *NBSMetricWrapper) PruneTableFiles(ctx context.Context) error
- func (nbsMW *NBSMetricWrapper) SetRootChunk(ctx context.Context, root, previous hash.Hash) error
- func (nbsMW *NBSMetricWrapper) Size(ctx context.Context) (uint64, error)
- func (nbsMW *NBSMetricWrapper) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
- func (nbsMW *NBSMetricWrapper) SupportedOperations() TableFileStoreOps
- func (nbsMW *NBSMetricWrapper) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, ...) error
- type NomsBlockStore
- func NewAWSStore(ctx context.Context, nbfVerStr string, table, ns, bucket string, s3 s3svc, ...) (*NomsBlockStore, error)
- func NewAWSStoreWithMMapIndex(ctx context.Context, nbfVerStr string, table, ns, bucket string, s3 s3svc, ...) (*NomsBlockStore, error)
- func NewBSStore(ctx context.Context, nbfVerStr string, bs blobstore.Blobstore, ...) (*NomsBlockStore, error)
- func NewGCSStore(ctx context.Context, nbfVerStr string, bucketName, path string, ...) (*NomsBlockStore, error)
- func NewLocalStore(ctx context.Context, nbfVerStr string, dir string, memTableSize uint64, ...) (*NomsBlockStore, error)
- func (nbs *NomsBlockStore) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
- func (nbs *NomsBlockStore) CalcReads(hashes hash.HashSet, blockSize uint64) (reads int, split bool, err error)
- func (nbs *NomsBlockStore) Close() error
- func (nbs *NomsBlockStore) Commit(ctx context.Context, current, last hash.Hash) (success bool, err error)
- func (nbs *NomsBlockStore) Count() (uint32, error)
- func (nbs *NomsBlockStore) Get(ctx context.Context, h hash.Hash) (chunks.Chunk, error)
- func (nbs *NomsBlockStore) GetChunkLocations(hashes hash.HashSet) (map[hash.Hash]map[hash.Hash]Range, error)
- func (nbs *NomsBlockStore) GetMany(ctx context.Context, hashes hash.HashSet, ...) error
- func (nbs *NomsBlockStore) GetManyCompressed(ctx context.Context, hashes hash.HashSet, ...) error
- func (nbs *NomsBlockStore) Has(ctx context.Context, h hash.Hash) (bool, error)
- func (nbs *NomsBlockStore) HasMany(ctx context.Context, hashes hash.HashSet) (hash.HashSet, error)
- func (nbs *NomsBlockStore) MarkAndSweepChunks(ctx context.Context, last hash.Hash, keepChunks <-chan []hash.Hash, ...) error
- func (nbs *NomsBlockStore) PruneTableFiles(ctx context.Context) (err error)
- func (nbs *NomsBlockStore) Put(ctx context.Context, c chunks.Chunk) error
- func (nbs *NomsBlockStore) Rebase(ctx context.Context) error
- func (nbs *NomsBlockStore) Root(ctx context.Context) (hash.Hash, error)
- func (nbs *NomsBlockStore) SetRootChunk(ctx context.Context, root, previous hash.Hash) error
- func (nbs *NomsBlockStore) Size(ctx context.Context) (uint64, error)
- func (nbs *NomsBlockStore) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
- func (nbs *NomsBlockStore) Stats() interface{}
- func (nbs *NomsBlockStore) StatsSummary() string
- func (nbs *NomsBlockStore) SupportedOperations() TableFileStoreOps
- func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.Hash]uint32) (mi ManifestInfo, err error)
- func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updates map[hash.Hash]uint32, ...) (mi ManifestInfo, err error)
- func (nbs *NomsBlockStore) Version() string
- func (nbs *NomsBlockStore) WithoutConjoiner() *NomsBlockStore
- func (nbs *NomsBlockStore) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, ...) error
- type OffsetsReader
- type Range
- type Stats
- type TableFile
- type TableFileStore
- type TableFileStoreOps
- type TableSpecInfo
- type UnlimitedQuotaProvider
Constants ¶
const (
// DynamoManifest does not yet include GC Generation
AWSStorageVersion = "4"
)
const (
// StorageVersion is the version of the on-disk Noms Chunks Store data format.
StorageVersion = "5"
)
Variables ¶
var ( ErrFetchFailure = errors.New("fetch failed") ErrSpecWithoutChunkSource = errors.New("manifest referenced table file for which there is no chunkSource.") ErrConcurrentManifestWriteDuringOverwrite = errors.New("concurrent manifest write during manifest overwrite") )
var ( ErrWrongBufferSize = errors.New("buffer length and/or capacity incorrect for chunkCount specified in footer") ErrWrongCopySize = errors.New("could not copy enough bytes") )
var ErrAlreadyFinished = errors.New("already Finished")
ErrAlreadyFinished is an error returned if Finish is called more than once on a CmpChunkTableWriter
var ErrBufferFull = errors.New("buffer full")
ErrBuffFull used by the FixedBufferSink when the data written is larger than the buffer allocated.
var ErrChunkAlreadyWritten = errors.New("chunk already written")
var ErrCorruptManifest = errors.New("corrupt manifest")
var ErrInvalidTableFile = errors.New("invalid or corrupt table file")
ErrInvalidTableFile is an error returned when a table file is corrupt or invalid.
var ErrNoChunkSource = errors.New("no chunk source")
var ErrNoReader = errors.New("could not get reader")
var (
ErrNotEnoughBytes = errors.New("reader did not return enough bytes")
)
var ErrNotFinished = errors.New("not finished")
ErrNotFinished is an error returned by a CmpChunkTableWriter when a call to Flush* is called before Finish is called
var ErrUnreadableManifest = errors.New("could not read file manifest")
var ErrUnsupportedManifestAppendixOption = errors.New("unsupported manifest appendix option")
var GlobalMmapStats = &mmapStats{ sync.Mutex{}, 0, noOpNotify, noOpNotify, noOpNotify, }
Functions ¶
func GetTableIndexPrefixes ¶
func GetTableIndexPrefixes(rd io.ReadSeeker) (prefixes []uint64, err error)
func GuessPrefixOrdinal ¶
func IterChunks ¶
func OverwriteStoreManifest ¶
func OverwriteStoreManifest(ctx context.Context, store *NomsBlockStore, root hash.Hash, tableFiles map[hash.Hash]uint32, appendixTableFiles map[hash.Hash]uint32) (err error)
OverwriteStoreManifest is a low level interface to completely replace the manifest contents of |store| with the supplied |root|, |tableFiles| and |appendixTableFiles|. It performs concurrency control on the existing |store| manifest, and can fail with |ErrConcurrentManifestWriteDuringOverwrite| if the |store|'s view is stale. If contents should be unconditionally replaced without regard for the existing contents, run this in a loop, rebasing |store| after each failure.
Regardless of success or failure, |OverwriteStoreManifest| does *not* Rebase the |store|. The persisted manifest contents will have been updated, but nothing about the in-memory view of the |store| will reflect those updates. If |store| is Rebase'd, then the new upstream contents will be picked up.
Extreme care should be taken when updating manifest contents through this interface. Logic typically assumes that stores grow monotonically unless the |gcGen| of a manifest changes. Since this interface cannot set |gcGen|, callers must ensure that calls to this function grow the store monotonically.
func ReadTableFooter ¶
func ReadTableFooter(rd io.ReadSeeker) (chunkCount uint32, totalUncompressedData uint64, err error)
func ReadTableIndexByCopy ¶
func ReadTableIndexByCopy(rd io.ReadSeeker, q MemoryQuotaProvider) (onHeapTableIndex, error)
ReadTableIndexByCopy loads an index into memory from an io.ReadSeeker Caution: Allocates new memory for entire index
func ValidateAddr ¶
Types ¶
type BlockBufferByteSink ¶
type BlockBufferByteSink struct {
// contains filtered or unexported fields
}
BlockBufferByteSink allocates blocks of data with a given block size to store the bytes written to the sink. New blocks are allocated as needed in order to handle all the data of the Write calls.
func NewBlockBufferTableSink ¶
func NewBlockBufferTableSink(blockSize int) *BlockBufferByteSink
NewBlockBufferTableSink creates a BlockBufferByteSink with the provided block size.
func (*BlockBufferByteSink) Flush ¶
func (sink *BlockBufferByteSink) Flush(wr io.Writer) (err error)
Flush writes all the data that was written to the ByteSink to the supplied writer
func (*BlockBufferByteSink) FlushToFile ¶
func (sink *BlockBufferByteSink) FlushToFile(path string) (err error)
FlushToFile writes all the data that was written to the ByteSink to a file at the given path
type BufferedFileByteSink ¶
type BufferedFileByteSink struct {
// contains filtered or unexported fields
}
BufferedFileByteSink is a ByteSink implementation that buffers some amount of data before it passes it to a background writing thread to be flushed to a file.
func NewBufferedFileByteSink ¶
func NewBufferedFileByteSink(tempDir string, blockSize, chBufferSize int) (*BufferedFileByteSink, error)
NewBufferedFileByteSink creates a BufferedFileByteSink
func (*BufferedFileByteSink) Flush ¶
func (sink *BufferedFileByteSink) Flush(wr io.Writer) (err error)
Flush writes all the data that was written to the ByteSink to the supplied writer
func (*BufferedFileByteSink) FlushToFile ¶
func (sink *BufferedFileByteSink) FlushToFile(path string) (err error)
FlushToFile writes all the data that was written to the ByteSink to a file at the given path
type ByteSink ¶
type ByteSink interface { io.Writer // Flush writes all the data that was written to the ByteSink to the supplied writer Flush(wr io.Writer) error // FlushToFile writes all the data that was written to the ByteSink to a file at the given path FlushToFile(path string) error }
A ByteSink is an interface for writing bytes which can later be flushed to a writer
type CmpChunkTableWriter ¶
type CmpChunkTableWriter struct {
// contains filtered or unexported fields
}
CmpChunkTableWriter writes CompressedChunks to a table file
func NewCmpChunkTableWriter ¶
func NewCmpChunkTableWriter(tempDir string) (*CmpChunkTableWriter, error)
NewCmpChunkTableWriter creates a new CmpChunkTableWriter instance with a default ByteSink
func (*CmpChunkTableWriter) AddCmpChunk ¶
func (tw *CmpChunkTableWriter) AddCmpChunk(c CompressedChunk) error
AddCmpChunk adds a compressed chunk
func (*CmpChunkTableWriter) ChunkCount ¶
func (tw *CmpChunkTableWriter) ChunkCount() uint32
func (*CmpChunkTableWriter) ContentLength ¶
func (tw *CmpChunkTableWriter) ContentLength() uint64
Gets the size of the entire table file in bytes
func (*CmpChunkTableWriter) Finish ¶
func (tw *CmpChunkTableWriter) Finish() (string, error)
Finish will write the index and footer of the table file and return the id of the file.
func (*CmpChunkTableWriter) Flush ¶
func (tw *CmpChunkTableWriter) Flush(wr io.Writer) error
Flush can be called after Finish in order to write the data out to the writer provided.
func (*CmpChunkTableWriter) FlushToFile ¶
func (tw *CmpChunkTableWriter) FlushToFile(path string) error
FlushToFile can be called after Finish in order to write the data out to the path provided.
func (*CmpChunkTableWriter) GetMD5 ¶
func (tw *CmpChunkTableWriter) GetMD5() []byte
Gets the MD5 of the entire table file
func (*CmpChunkTableWriter) Size ¶
func (tw *CmpChunkTableWriter) Size() int
Size returns the number of compressed chunks that have been added
type CompressedChunk ¶
type CompressedChunk struct { // H is the hash of the chunk H hash.Hash // FullCompressedChunk is the entirety of the compressed chunk data including the crc FullCompressedChunk []byte // CompressedData is just the snappy encoded byte buffer that stores the chunk data CompressedData []byte }
CompressedChunk represents a chunk of data in a table file which is still compressed via snappy.
var EmptyCompressedChunk CompressedChunk
func ChunkToCompressedChunk ¶
func ChunkToCompressedChunk(chunk chunks.Chunk) CompressedChunk
func NewCompressedChunk ¶
func NewCompressedChunk(h hash.Hash, buff []byte) (CompressedChunk, error)
NewCompressedChunk creates a CompressedChunk
func (CompressedChunk) Hash ¶
func (cmp CompressedChunk) Hash() hash.Hash
Hash returns the hash of the data
func (CompressedChunk) IsEmpty ¶
func (cmp CompressedChunk) IsEmpty() bool
IsEmpty returns true if the chunk contains no data.
type FixedBufferByteSink ¶
type FixedBufferByteSink struct {
// contains filtered or unexported fields
}
FixedBufferByteSink is a ByteSink implementation with a buffer whose size will not change. Writing more data than the fixed buffer can hold will result in an error
func NewFixedBufferTableSink ¶
func NewFixedBufferTableSink(buff []byte) *FixedBufferByteSink
NewFixedBufferTableSink creates a FixedBufferTableSink which will use the supplied buffer
func (*FixedBufferByteSink) Flush ¶
func (sink *FixedBufferByteSink) Flush(wr io.Writer) error
Flush writes all the data that was written to the ByteSink to the supplied writer
func (*FixedBufferByteSink) FlushToFile ¶
func (sink *FixedBufferByteSink) FlushToFile(path string) (err error)
FlushToFile writes all the data that was written to the ByteSink to a file at the given path
type GenerationalNBS ¶
type GenerationalNBS struct {
// contains filtered or unexported fields
}
func NewGenerationalCS ¶
func NewGenerationalCS(oldGen, newGen *NomsBlockStore) *GenerationalNBS
func (*GenerationalNBS) AddTableFilesToManifest ¶
func (gcs *GenerationalNBS) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
AddTableFilesToManifest adds table files to the manifest of the newgen cs
func (*GenerationalNBS) Close ¶
func (gcs *GenerationalNBS) Close() error
Close tears down any resources in use by the implementation. After // Close(), the ChunkStore may not be used again. It is NOT SAFE to call Close() concurrently with any other ChunkStore method; behavior is undefined and probably crashy.
func (*GenerationalNBS) Commit ¶
Commit atomically attempts to persist all novel Chunks and update the persisted root hash from last to current (or keeps it the same). If last doesn't match the root in persistent storage, returns false.
func (*GenerationalNBS) Get ¶
Get the Chunk for the value of the hash in the store. If the hash is absent from the store EmptyChunk is returned.
func (*GenerationalNBS) GetMany ¶
func (gcs *GenerationalNBS) GetMany(ctx context.Context, hashes hash.HashSet, found func(context.Context, *chunks.Chunk)) error
GetMany gets the Chunks with |hashes| from the store. On return, |foundChunks| will have been fully sent all chunks which have been found. Any non-present chunks will silently be ignored.
func (*GenerationalNBS) GetManyCompressed ¶
func (gcs *GenerationalNBS) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, CompressedChunk)) error
func (*GenerationalNBS) Has ¶
Returns true iff the value at the address |h| is contained in the store
func (*GenerationalNBS) HasMany ¶
func (gcs *GenerationalNBS) HasMany(ctx context.Context, hashes hash.HashSet) (absent hash.HashSet, err error)
Returns a new HashSet containing any members of |hashes| that are absent from the store.
func (*GenerationalNBS) NewGen ¶
func (gcs *GenerationalNBS) NewGen() chunks.ChunkStoreGarbageCollector
func (*GenerationalNBS) OldGen ¶
func (gcs *GenerationalNBS) OldGen() chunks.ChunkStoreGarbageCollector
func (*GenerationalNBS) PruneTableFiles ¶
func (gcs *GenerationalNBS) PruneTableFiles(ctx context.Context) error
PruneTableFiles deletes old table files that are no longer referenced in the manifest of the new or old gen chunkstores
func (*GenerationalNBS) Put ¶
Put caches c in the ChunkSource. Upon return, c must be visible to subsequent Get and Has calls, but must not be persistent until a call to Flush(). Put may be called concurrently with other calls to Put(), Get(), GetMany(), Has() and HasMany().
func (*GenerationalNBS) Rebase ¶
func (gcs *GenerationalNBS) Rebase(ctx context.Context) error
Rebase brings this ChunkStore into sync with the persistent storage's current root.
func (*GenerationalNBS) Root ¶
Root returns the root of the database as of the time the ChunkStore was opened or the most recent call to Rebase.
func (*GenerationalNBS) SetRootChunk ¶
SetRootChunk changes the root chunk hash from the previous value to the new root for the newgen cs
func (*GenerationalNBS) Size ¶
func (gcs *GenerationalNBS) Size(ctx context.Context) (uint64, error)
Size returns the total size, in bytes, of the table files in the new and old gen stores combined
func (*GenerationalNBS) Sources ¶
func (gcs *GenerationalNBS) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
Sources retrieves the current root hash, a list of all the table files (which may include appendix table files), and a second list containing only appendix table files for both the old gen and new gen stores.
func (*GenerationalNBS) Stats ¶
func (gcs *GenerationalNBS) Stats() interface{}
Stats may return some kind of struct that reports statistics about the ChunkStore instance. The type is implementation-dependent, and impls may return nil
func (*GenerationalNBS) StatsSummary ¶
func (gcs *GenerationalNBS) StatsSummary() string
StatsSummary may return a string containing summarized statistics for this ChunkStore. It must return "Unsupported" if this operation is not supported.
func (*GenerationalNBS) SupportedOperations ¶
func (gcs *GenerationalNBS) SupportedOperations() TableFileStoreOps
SupportedOperations returns a description of the support TableFile operations. Some stores only support reading table files, not writing.
func (*GenerationalNBS) Version ¶
func (gcs *GenerationalNBS) Version() string
Returns the NomsVersion with which this ChunkSource is compatible.
func (*GenerationalNBS) WriteTableFile ¶
func (gcs *GenerationalNBS) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, getRd func() (io.ReadCloser, uint64, error)) error
WriteTableFile will read a table file from the provided reader and write it to the new gen TableFileStore
type HashingByteSink ¶
type HashingByteSink struct {
// contains filtered or unexported fields
}
HashingByteSink is a ByteSink that keeps an md5 hash of all the data written to it.
func NewHashingByteSink ¶
func NewHashingByteSink(backingSink ByteSink) *HashingByteSink
func (*HashingByteSink) Flush ¶
func (sink *HashingByteSink) Flush(wr io.Writer) error
Flush writes all the data that was written to the ByteSink to the supplied writer
func (*HashingByteSink) FlushToFile ¶
func (sink *HashingByteSink) FlushToFile(path string) error
FlushToFile writes all the data that was written to the ByteSink to a file at the given path
func (*HashingByteSink) GetMD5 ¶
func (sink *HashingByteSink) GetMD5() []byte
GetMD5 gets the MD5 hash of all the bytes written to the sink
func (*HashingByteSink) Size ¶
func (sink *HashingByteSink) Size() uint64
Size gets the number of bytes written to the sink
type ManifestAppendixOption ¶
type ManifestAppendixOption int
const ( ManifestAppendixOption_Unspecified ManifestAppendixOption = iota ManifestAppendixOption_Set ManifestAppendixOption_Append )
type ManifestInfo ¶
type ManifestInfo interface { GetVersion() string GetLock() string GetGCGen() string GetRoot() hash.Hash NumTableSpecs() int NumAppendixSpecs() int GetTableSpecInfo(i int) TableSpecInfo GetAppendixTableSpecInfo(i int) TableSpecInfo }
ManifestInfo is an interface for retrieving data from a manifest outside of this package
func ParseManifest ¶
func ParseManifest(r io.Reader) (ManifestInfo, error)
ParseManifest parses a manifest file from the supplied reader
type MemoryQuotaProvider ¶
type NBSCompressedChunkStore ¶
type NBSCompressedChunkStore interface { chunks.ChunkStore GetManyCompressed(context.Context, hash.HashSet, func(context.Context, CompressedChunk)) error }
type NBSMetricWrapper ¶
type NBSMetricWrapper struct { *chunks.CSMetricWrapper // contains filtered or unexported fields }
NBSMetricWrapper is a ChunkStore implementation that wraps a ChunkStore, and collects metrics on the calls.
func NewNBSMetricWrapper ¶
func NewNBSMetricWrapper(nbs *NomsBlockStore) *NBSMetricWrapper
NewCSMetricWrapper returns a new NBSMetricWrapper
func (*NBSMetricWrapper) AddTableFilesToManifest ¶
func (nbsMW *NBSMetricWrapper) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
AddTableFilesToManifest adds table files to the manifest
func (*NBSMetricWrapper) GetManyCompressed ¶
func (nbsMW *NBSMetricWrapper) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, CompressedChunk)) error
GetManyCompressed gets the compressed Chunks with |hashes| from the store. On return, |found| will have been fully sent all chunks which have been found. Any non-present chunks will silently be ignored.
func (*NBSMetricWrapper) MarkAndSweepChunks ¶
func (nbsMW *NBSMetricWrapper) MarkAndSweepChunks(ctx context.Context, last hash.Hash, keepChunks <-chan []hash.Hash, dest chunks.ChunkStore) error
func (*NBSMetricWrapper) PruneTableFiles ¶
func (nbsMW *NBSMetricWrapper) PruneTableFiles(ctx context.Context) error
PruneTableFiles deletes old table files that are no longer referenced in the manifest.
func (*NBSMetricWrapper) SetRootChunk ¶
SetRootChunk changes the root chunk hash from the previous value to the new root.
func (*NBSMetricWrapper) Size ¶
func (nbsMW *NBSMetricWrapper) Size(ctx context.Context) (uint64, error)
func (*NBSMetricWrapper) Sources ¶
func (nbsMW *NBSMetricWrapper) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
Sources retrieves the current root hash, a list of all the table files, and a list of the appendix table files.
func (*NBSMetricWrapper) SupportedOperations ¶
func (nbsMW *NBSMetricWrapper) SupportedOperations() TableFileStoreOps
Forwards SupportedOperations to wrapped block store.
func (*NBSMetricWrapper) WriteTableFile ¶
func (nbsMW *NBSMetricWrapper) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, getRd func() (io.ReadCloser, uint64, error)) error
WriteTableFile will read a table file from the provided reader and write it to the TableFileStore
type NomsBlockStore ¶
type NomsBlockStore struct {
// contains filtered or unexported fields
}
func NewAWSStore ¶
func NewAWSStore(ctx context.Context, nbfVerStr string, table, ns, bucket string, s3 s3svc, ddb ddbsvc, memTableSize uint64, q MemoryQuotaProvider) (*NomsBlockStore, error)
func NewAWSStoreWithMMapIndex ¶
func NewAWSStoreWithMMapIndex(ctx context.Context, nbfVerStr string, table, ns, bucket string, s3 s3svc, ddb ddbsvc, memTableSize uint64, q MemoryQuotaProvider) (*NomsBlockStore, error)
func NewBSStore ¶
func NewBSStore(ctx context.Context, nbfVerStr string, bs blobstore.Blobstore, memTableSize uint64, q MemoryQuotaProvider) (*NomsBlockStore, error)
NewBSStore returns an nbs implementation backed by a Blobstore
func NewGCSStore ¶
func NewGCSStore(ctx context.Context, nbfVerStr string, bucketName, path string, gcs *storage.Client, memTableSize uint64, q MemoryQuotaProvider) (*NomsBlockStore, error)
NewGCSStore returns an nbs implementation backed by a GCSBlobstore
func NewLocalStore ¶
func NewLocalStore(ctx context.Context, nbfVerStr string, dir string, memTableSize uint64, q MemoryQuotaProvider) (*NomsBlockStore, error)
func (*NomsBlockStore) AddTableFilesToManifest ¶
func (nbs *NomsBlockStore) AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error
AddTableFilesToManifest adds table files to the manifest
func (*NomsBlockStore) Close ¶
func (nbs *NomsBlockStore) Close() error
func (*NomsBlockStore) Count ¶
func (nbs *NomsBlockStore) Count() (uint32, error)
func (*NomsBlockStore) GetChunkLocations ¶
func (*NomsBlockStore) GetManyCompressed ¶
func (nbs *NomsBlockStore) GetManyCompressed(ctx context.Context, hashes hash.HashSet, found func(context.Context, CompressedChunk)) error
func (*NomsBlockStore) MarkAndSweepChunks ¶
func (nbs *NomsBlockStore) MarkAndSweepChunks(ctx context.Context, last hash.Hash, keepChunks <-chan []hash.Hash, dest chunks.ChunkStore) error
func (*NomsBlockStore) PruneTableFiles ¶
func (nbs *NomsBlockStore) PruneTableFiles(ctx context.Context) (err error)
PruneTableFiles deletes old table files that are no longer referenced in the manifest.
func (*NomsBlockStore) SetRootChunk ¶
SetRootChunk changes the root chunk hash from the previous value to the new root.
func (*NomsBlockStore) Sources ¶
func (nbs *NomsBlockStore) Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error)
Sources retrieves the current root hash, a list of all table files (which may include appendix tablefiles), and a second list of only the appendix table files
func (*NomsBlockStore) Stats ¶
func (nbs *NomsBlockStore) Stats() interface{}
func (*NomsBlockStore) StatsSummary ¶
func (nbs *NomsBlockStore) StatsSummary() string
func (*NomsBlockStore) SupportedOperations ¶
func (nbs *NomsBlockStore) SupportedOperations() TableFileStoreOps
func (*NomsBlockStore) UpdateManifest ¶
func (nbs *NomsBlockStore) UpdateManifest(ctx context.Context, updates map[hash.Hash]uint32) (mi ManifestInfo, err error)
func (*NomsBlockStore) UpdateManifestWithAppendix ¶
func (nbs *NomsBlockStore) UpdateManifestWithAppendix(ctx context.Context, updates map[hash.Hash]uint32, option ManifestAppendixOption) (mi ManifestInfo, err error)
func (*NomsBlockStore) Version ¶
func (nbs *NomsBlockStore) Version() string
func (*NomsBlockStore) WithoutConjoiner ¶
func (nbs *NomsBlockStore) WithoutConjoiner() *NomsBlockStore
WithoutConjoiner returns a new *NomsBlockStore instance that will not conjoin table files during manifest updates. Used in some server-side contexts when things like table file maintenance is done out-of-process. Not safe for use outside of NomsBlockStore construction.
func (*NomsBlockStore) WriteTableFile ¶
func (nbs *NomsBlockStore) WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, getRd func() (io.ReadCloser, uint64, error)) error
WriteTableFile will read a table file from the provided reader and write it to the TableFileStore
type OffsetsReader ¶
type OffsetsReader struct {
// contains filtered or unexported fields
}
OffsetsReader transforms a byte stream of table file lengths into a byte stream of table file offsets
func NewOffsetsReader ¶
func NewOffsetsReader(lengthsReader io.Reader) *OffsetsReader
type Stats ¶
type Stats struct { OpenLatency metrics.Histogram CommitLatency metrics.Histogram IndexReadLatency metrics.Histogram IndexBytesPerRead metrics.Histogram GetLatency metrics.Histogram ChunksPerGet metrics.Histogram FileReadLatency metrics.Histogram FileBytesPerRead metrics.Histogram S3ReadLatency metrics.Histogram S3BytesPerRead metrics.Histogram MemReadLatency metrics.Histogram MemBytesPerRead metrics.Histogram DynamoReadLatency metrics.Histogram DynamoBytesPerRead metrics.Histogram HasLatency metrics.Histogram AddressesPerHas metrics.Histogram PutLatency metrics.Histogram PersistLatency metrics.Histogram BytesPerPersist metrics.Histogram ChunksPerPersist metrics.Histogram CompressedChunkBytesPerPersist metrics.Histogram UncompressedChunkBytesPerPersist metrics.Histogram ConjoinLatency metrics.Histogram BytesPerConjoin metrics.Histogram ChunksPerConjoin metrics.Histogram TablesPerConjoin metrics.Histogram ReadManifestLatency metrics.Histogram WriteManifestLatency metrics.Histogram }
type TableFile ¶
type TableFile interface { // FileID gets the id of the file FileID() string // NumChunks returns the number of chunks in a table file NumChunks() int // Open returns an io.ReadCloser which can be used to read the bytes of a // table file. It also returns the content length of the table file. Open(ctx context.Context) (io.ReadCloser, uint64, error) }
TableFile is an interface for working with an existing table file
type TableFileStore ¶
type TableFileStore interface { // Sources retrieves the current root hash, a list of all the table files (which may include appendix table files), // and a second list containing only appendix table files. Sources(ctx context.Context) (hash.Hash, []TableFile, []TableFile, error) // Size returns the total size, in bytes, of the table files in this Store. Size(ctx context.Context) (uint64, error) // WriteTableFile will read a table file from the provided reader and write it to the TableFileStore. WriteTableFile(ctx context.Context, fileId string, numChunks int, contentHash []byte, getRd func() (io.ReadCloser, uint64, error)) error // AddTableFilesToManifest adds table files to the manifest AddTableFilesToManifest(ctx context.Context, fileIdToNumChunks map[string]int) error // PruneTableFiles deletes old table files that are no longer referenced in the manifest. PruneTableFiles(ctx context.Context) error // SetRootChunk changes the root chunk hash from the previous value to the new root. SetRootChunk(ctx context.Context, root, previous hash.Hash) error // SupportedOperations returns a description of the support TableFile operations. Some stores only support reading table files, not writing. SupportedOperations() TableFileStoreOps }
TableFileStore is an interface for interacting with table files directly
type TableFileStoreOps ¶
type TableFileStoreOps struct { // True is the TableFileStore supports reading table files. CanRead bool // True is the TableFileStore supports writing table files. CanWrite bool // True is the TableFileStore supports pruning unused table files. CanPrune bool // True is the TableFileStore supports garbage collecting chunks. CanGC bool }
Describes what is possible to do with TableFiles in a TableFileStore.
type TableSpecInfo ¶
TableSpecInfo is an interface for retrieving data from a tableSpec outside of this package
type UnlimitedQuotaProvider ¶
type UnlimitedQuotaProvider struct {
// contains filtered or unexported fields
}
func NewUnlimitedMemQuotaProvider ¶
func NewUnlimitedMemQuotaProvider() *UnlimitedQuotaProvider
func (*UnlimitedQuotaProvider) AcquireQuota ¶
func (q *UnlimitedQuotaProvider) AcquireQuota(ctx context.Context, memory uint64) error
func (*UnlimitedQuotaProvider) ReleaseQuota ¶
func (q *UnlimitedQuotaProvider) ReleaseQuota(memory uint64) error
func (*UnlimitedQuotaProvider) Usage ¶
func (q *UnlimitedQuotaProvider) Usage() uint64
Source Files ¶
- aws_chunk_source.go
- aws_table_persister.go
- bs_manifest.go
- bs_persister.go
- byte_sink.go
- chunk_source_adapter.go
- cmp_chunk_table_writer.go
- conjoiner.go
- dynamo_manifest.go
- dynamo_table_reader.go
- fd_cache.go
- file_manifest.go
- file_table_persister.go
- file_table_reader.go
- fs_table_cache.go
- gc_copier.go
- generational_chunk_store.go
- index_transformer.go
- manifest.go
- manifest_cache.go
- mem_table.go
- mmap_table_reader_unix.go
- nbs_metrics_wrapper.go
- persisting_chunk_source.go
- quota.go
- s3_table_reader.go
- stats.go
- store.go
- table.go
- table_index.go
- table_persister.go
- table_reader.go
- table_set.go
- table_writer.go
- util.go