Documentation ¶
Index ¶
- Constants
- Variables
- func ReadyBlock(ctx context.Context, bcache BlockCache, rp ReadyProvider, ...) (info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error)
- type BadDataError
- type BadSplitError
- type Block
- type BlockCache
- type BlockCacheHashBehavior
- type BlockCacheLifetime
- type BlockCacheSimple
- type BlockCacheStandard
- func (b *BlockCacheStandard) CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error)
- func (b *BlockCacheStandard) DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error
- func (b *BlockCacheStandard) DeletePermanent(id kbfsblock.ID) error
- func (b *BlockCacheStandard) DeleteTransient(id kbfsblock.ID, tlf tlf.ID) error
- func (b *BlockCacheStandard) Get(ptr BlockPointer) (Block, error)
- func (b *BlockCacheStandard) GetCleanBytesCapacity() (capacity uint64)
- func (b *BlockCacheStandard) GetWithLifetime(ptr BlockPointer) (Block, BlockCacheLifetime, error)
- func (b *BlockCacheStandard) NumCleanTransientBlocks() int
- func (b *BlockCacheStandard) Put(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, ...) error
- func (b *BlockCacheStandard) SetCleanBytesCapacity(capacity uint64)
- type BlockDirectType
- type BlockInfo
- type BlockPointer
- type BlockPutState
- type BlockRef
- type BlockReqType
- type BlockSplitter
- type BlockSplitterSimple
- func (b *BlockSplitterSimple) CheckSplit(block *FileBlock) int64
- func (b *BlockSplitterSimple) CopyUntilSplit(block *FileBlock, lastBlock bool, data []byte, off int64) int64
- func (b *BlockSplitterSimple) MaxPtrsPerBlock() int
- func (b *BlockSplitterSimple) MaxSize() int64
- func (b *BlockSplitterSimple) SetBlockChangeEmbedMaxSizeForTesting(newSize uint64)
- func (b *BlockSplitterSimple) SetMaxDirEntriesByBlockSize(codec kbfscodec.Codec) error
- func (b *BlockSplitterSimple) SetMaxDirEntriesPerBlockForTesting(newMax int)
- func (b *BlockSplitterSimple) ShouldEmbedData(size uint64) bool
- func (b *BlockSplitterSimple) SplitDirIfNeeded(block *DirBlock) ([]*DirBlock, *StringOffset)
- type BlockWithPtrs
- type BranchName
- type CachePutCacheFullError
- type CommonBlock
- func (cb *CommonBlock) BytesCanBeDirtied() int64
- func (cb *CommonBlock) DataVersion() Ver
- func (cb *CommonBlock) DeepCopy() CommonBlock
- func (cb *CommonBlock) GetEncodedSize() uint32
- func (cb *CommonBlock) IsIndirect() bool
- func (cb *CommonBlock) IsTail() bool
- func (cb *CommonBlock) NewEmptier() func() Block
- func (cb *CommonBlock) NewEmpty() Block
- func (cb *CommonBlock) OffsetExceedsData(_, _ Offset) bool
- func (cb *CommonBlock) Set(other Block)
- func (cb *CommonBlock) SetEncodedSize(size uint32)
- func (cb *CommonBlock) ToCommonBlock() *CommonBlock
- type DirBlock
- func (db *DirBlock) AppendNewIndirectPtr(ptr BlockPointer, off Offset)
- func (db *DirBlock) BytesCanBeDirtied() int64
- func (db *DirBlock) ClearIndirectPtrSize(i int)
- func (db *DirBlock) DataVersion() Ver
- func (db *DirBlock) DeepCopy() *DirBlock
- func (db *DirBlock) FirstOffset() Offset
- func (db *DirBlock) IndirectPtr(i int) (BlockInfo, Offset)
- func (db *DirBlock) IsTail() bool
- func (db *DirBlock) NewEmptier() func() Block
- func (db *DirBlock) NewEmpty() Block
- func (db *DirBlock) NumIndirectPtrs() int
- func (db *DirBlock) OffsetExceedsData(startOff, off Offset) bool
- func (db *DirBlock) Set(other Block)
- func (db *DirBlock) SetIndirectPtrInfo(i int, info BlockInfo)
- func (db *DirBlock) SetIndirectPtrOff(i int, off Offset)
- func (db *DirBlock) SetIndirectPtrType(i int, dt BlockDirectType)
- func (db *DirBlock) SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int)
- func (db *DirBlock) ToCommonBlock() *CommonBlock
- func (db *DirBlock) TotalPlainSizeEstimate(plainSize int, bsplit BlockSplitter) int
- type DirData
- func (dd *DirData) AddEntry(ctx context.Context, newName string, newDe DirEntry) (unrefs []BlockInfo, err error)
- func (dd *DirData) GetChildren(ctx context.Context) (children map[string]EntryInfo, err error)
- func (dd *DirData) GetDirtyChildPtrs(ctx context.Context, dirtyBcache IsDirtyProvider) (ptrs map[BlockPointer]bool, err error)
- func (dd *DirData) GetEntries(ctx context.Context) (children map[string]DirEntry, err error)
- func (dd *DirData) GetIndirectDirBlockInfos(ctx context.Context) ([]BlockInfo, error)
- func (dd *DirData) GetTopBlock(ctx context.Context, rtype BlockReqType) (*DirBlock, error)
- func (dd *DirData) Lookup(ctx context.Context, name string) (DirEntry, error)
- func (dd *DirData) Ready(ctx context.Context, id tlf.ID, bcache BlockCache, dirtyBcache IsDirtyProvider, ...) (map[BlockInfo]BlockPointer, error)
- func (dd *DirData) RemoveEntry(ctx context.Context, name string) (unrefs []BlockInfo, err error)
- func (dd *DirData) SetEntry(ctx context.Context, name string, newDe DirEntry) (unrefs []BlockInfo, err error)
- func (dd *DirData) UpdateEntry(ctx context.Context, name string, newDe DirEntry) (unrefs []BlockInfo, err error)
- type DirEntries
- type DirEntriesBySizeAsc
- type DirEntriesBySizeDesc
- type DirEntry
- type DirEntryWithName
- type DirtyBlockCache
- type DirtyBlockCacheSimple
- type DirtyBlockCacheStandard
- func (d *DirtyBlockCacheStandard) BlockSyncFinished(_ tlf.ID, size int64)
- func (d *DirtyBlockCacheStandard) Delete(_ tlf.ID, ptr BlockPointer, branch BranchName) error
- func (d *DirtyBlockCacheStandard) Get(_ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName) (Block, error)
- func (d *DirtyBlockCacheStandard) IsAnyDirty(_ tlf.ID) bool
- func (d *DirtyBlockCacheStandard) IsDirty(_ tlf.ID, ptr BlockPointer, branch BranchName) (isDirty bool)
- func (d *DirtyBlockCacheStandard) Put(_ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName, block Block) error
- func (d *DirtyBlockCacheStandard) RequestPermissionToDirty(ctx context.Context, _ tlf.ID, estimatedDirtyBytes int64) (DirtyPermChan, error)
- func (d *DirtyBlockCacheStandard) ShouldForceSync(_ tlf.ID) bool
- func (d *DirtyBlockCacheStandard) Shutdown() error
- func (d *DirtyBlockCacheStandard) Size() int
- func (d *DirtyBlockCacheStandard) SyncFinished(_ tlf.ID, size int64)
- func (d *DirtyBlockCacheStandard) UpdateSyncingBytes(_ tlf.ID, size int64)
- func (d *DirtyBlockCacheStandard) UpdateUnsyncedBytes(_ tlf.ID, newUnsyncedBytes int64, wasSyncing bool)
- type DirtyFile
- func (df *DirtyFile) AddDeferredNewBytes(bytes int64)
- func (df *DirtyFile) AddErrListener(listener chan<- error)
- func (df *DirtyFile) AssimilateDeferredNewBytes()
- func (df *DirtyFile) BlockNeedsCopy(ptr BlockPointer) bool
- func (df *DirtyFile) FinishSync() error
- func (df *DirtyFile) IsBlockOrphaned(ptr BlockPointer) bool
- func (df *DirtyFile) NotifyErrListeners(err error)
- func (df *DirtyFile) NumErrListeners() int
- func (df *DirtyFile) ResetSyncingBlocksToDirty()
- func (df *DirtyFile) SetBlockDirty(ptr BlockPointer) (needsCaching bool, isSyncing bool)
- func (df *DirtyFile) SetBlockOrphaned(ptr BlockPointer, orphaned bool)
- func (df *DirtyFile) SetBlockSyncing(ctx context.Context, ptr BlockPointer) error
- func (df *DirtyFile) UpdateNotYetSyncingBytes(newBytes int64)
- type DirtyPermChan
- type EntryInfo
- type EntryType
- type FileBlock
- func (fb *FileBlock) AppendNewIndirectPtr(ptr BlockPointer, off Offset)
- func (fb *FileBlock) BytesCanBeDirtied() int64
- func (fb *FileBlock) ClearIndirectPtrSize(i int)
- func (fb *FileBlock) DataVersion() Ver
- func (fb *FileBlock) DeepCopy() *FileBlock
- func (fb *FileBlock) FirstOffset() Offset
- func (fb *FileBlock) GetHash() kbfshash.RawDefaultHash
- func (fb *FileBlock) IndirectPtr(i int) (BlockInfo, Offset)
- func (fb *FileBlock) IsTail() bool
- func (fb *FileBlock) NewEmptier() func() Block
- func (fb *FileBlock) NewEmpty() Block
- func (fb *FileBlock) NumIndirectPtrs() int
- func (fb *FileBlock) OffsetExceedsData(startOff, off Offset) bool
- func (fb *FileBlock) Set(other Block)
- func (fb *FileBlock) SetIndirectPtrInfo(i int, info BlockInfo)
- func (fb *FileBlock) SetIndirectPtrOff(i int, off Offset)
- func (fb *FileBlock) SetIndirectPtrType(i int, dt BlockDirectType)
- func (fb *FileBlock) SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int)
- func (fb *FileBlock) ToCommonBlock() *CommonBlock
- type FileBlockGetter
- type FileData
- func (fd *FileData) DeepCopy(ctx context.Context, dataVer Ver) (newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error)
- func (fd *FileData) FindIPtrsAndClearSize(ctx context.Context, topBlock *FileBlock, ptrs map[BlockPointer]bool) (found map[BlockPointer]bool, err error)
- func (fd *FileData) GetBytes(ctx context.Context, startOff, endOff Int64Offset) (data []byte, err error)
- func (fd *FileData) GetFileBlockAtOffset(ctx context.Context, topBlock *FileBlock, off Int64Offset, rtype BlockReqType) (ptr BlockPointer, parentBlocks []ParentBlockAndChildIndex, block *FileBlock, ...)
- func (fd *FileData) GetIndirectFileBlockInfos(ctx context.Context) ([]BlockInfo, error)
- func (fd *FileData) GetIndirectFileBlockInfosWithTopBlock(ctx context.Context, topBlock *FileBlock) ([]BlockInfo, error)
- func (fd *FileData) Read(ctx context.Context, dest []byte, startOff Int64Offset) (int64, error)
- func (fd *FileData) Ready(ctx context.Context, id tlf.ID, bcache BlockCache, dirtyBcache IsDirtyProvider, ...) (map[BlockInfo]BlockPointer, error)
- func (fd *FileData) ReadyNonLeafBlocksInCopy(ctx context.Context, bcache BlockCache, rp ReadyProvider, bps BlockPutState, ...) ([]BlockInfo, error)
- func (fd *FileData) Split(ctx context.Context, id tlf.ID, dirtyBcache DirtyBlockCache, ...) (unrefs []BlockInfo, err error)
- func (fd *FileData) TruncateExtend(ctx context.Context, size uint64, topBlock *FileBlock, ...) (newDe DirEntry, dirtyPtrs []BlockPointer, err error)
- func (fd *FileData) TruncateShrink(ctx context.Context, size uint64, topBlock *FileBlock, oldDe DirEntry) (newDe DirEntry, dirtyPtrs []BlockPointer, unrefs []BlockInfo, ...)
- func (fd *FileData) UndupChildrenInCopy(ctx context.Context, bcache BlockCache, rp ReadyProvider, bps BlockPutState, ...) ([]BlockInfo, error)
- func (fd *FileData) Write(ctx context.Context, data []byte, off Int64Offset, topBlock *FileBlock, ...) (newDe DirEntry, dirtyPtrs []BlockPointer, unrefs []BlockInfo, ...)
- type FolderBranch
- type IndirectDirPtr
- type IndirectFilePtr
- type Int64Offset
- type IsDirtyProvider
- type NameExistsError
- type NoSuchBlockError
- type NotDirectFileBlockError
- type Offset
- type ParentBlockAndChildIndex
- type Path
- func (p Path) CanonicalPathString() string
- func (p Path) ChildPath(name string, ptr BlockPointer) Path
- func (p Path) ChildPathNoPtr(name string) Path
- func (p Path) DebugString() string
- func (p Path) HasValidParent() bool
- func (p Path) IsValid() bool
- func (p Path) IsValidForNotification() bool
- func (p Path) ParentPath() *Path
- func (p Path) String() string
- func (p Path) TailName() string
- func (p Path) TailPointer() BlockPointer
- func (p Path) TailRef() BlockRef
- type PathNode
- type PrevRevisionAndCount
- type PrevRevisions
- type ReadyBlockData
- type ReadyProvider
- type ShutdownHappenedError
- type StringOffset
- type Ver
- type Versioner
- type WallClock
Constants ¶
const ( // MaxBlockSizeBytesDefault is the default maximum block size for KBFS. // 512K blocks by default, block changes embedded max == 8K. // Block size was chosen somewhat arbitrarily by trying to // minimize the overall size of the history written by a user when // appending 1KB writes to a file, up to a 1GB total file. Here // is the output of a simple script that approximates that // calculation: // // Total history size for 0065536-byte blocks: 1134341128192 bytes // Total history size for 0131072-byte blocks: 618945052672 bytes // Total history size for 0262144-byte blocks: 412786622464 bytes // Total history size for 0524288-byte blocks: 412786622464 bytes // Total history size for 1048576-byte blocks: 618945052672 bytes // Total history size for 2097152-byte blocks: 1134341128192 bytes // Total history size for 4194304-byte blocks: 2216672886784 bytes MaxBlockSizeBytesDefault = 512 << 10 // MaxNameBytesDefault is the max supported size of a directory // entry name. MaxNameBytesDefault = 255 // BackgroundTaskTimeout is the timeout for any background task. BackgroundTaskTimeout = 1 * time.Minute )
Variables ¶
var BPSize = uint64(reflect.TypeOf(BlockPointer{}).Size())
BPSize is the estimated size of a block pointer in bytes.
Functions ¶
func ReadyBlock ¶
func ReadyBlock( ctx context.Context, bcache BlockCache, rp ReadyProvider, kmd libkey.KeyMetadata, block Block, chargedTo keybase1.UserOrTeamID, bType keybase1.BlockType) ( info BlockInfo, plainSize int, readyBlockData ReadyBlockData, err error)
ReadyBlock is a thin wrapper around ReadyProvider.Ready() that handles checking for duplicates.
Types ¶
type BadDataError ¶
BadDataError indicates that KBFS is storing corrupt data for a block.
func (BadDataError) Error ¶
func (e BadDataError) Error() string
Error implements the error interface for BadDataError
type BadSplitError ¶
type BadSplitError struct { }
BadSplitError indicates that the BlockSplitter has an error.
func (BadSplitError) Error ¶
func (e BadSplitError) Error() string
Error implements the error interface for BadSplitError
type Block ¶
type Block interface { Versioner // GetEncodedSize returns the encoded size of this block, but only // if it has been previously set; otherwise it returns 0. GetEncodedSize() uint32 // SetEncodedSize sets the encoded size of this block, locally // caching it. The encoded size is not serialized. SetEncodedSize(size uint32) // NewEmpty returns a new block of the same type as this block NewEmpty() Block // NewEmptier returns a function that creates a new block of the // same type as this block. NewEmptier() func() Block // Set sets this block to the same value as the passed-in block Set(other Block) // ToCommonBlock retrieves this block as a *CommonBlock. ToCommonBlock() *CommonBlock // IsIndirect indicates whether this block contains indirect pointers. IsIndirect() bool // IsTail returns true if this block doesn't point to any other // blocks, either indirectly or in child directory entries. IsTail() bool // OffsetExceedsData returns true if `off` is greater than the // data contained in a direct block, assuming it starts at // `startOff`. Note that the offset of the next block isn't // relevant; this function should only indicate whether the offset // is greater than what currently could be stored in this block. OffsetExceedsData(startOff, off Offset) bool // BytesCanBeDirtied returns the number of bytes that should be // marked as dirtied if this block is dirtied. BytesCanBeDirtied() int64 }
Block just needs to be (de)serialized using msgpack
func NewCommonBlock ¶
func NewCommonBlock() Block
NewCommonBlock returns a generic block, unsuitable for caching.
type BlockCache ¶
type BlockCache interface { BlockCacheSimple // CheckForKnownPtr sees whether this cache has a transient // entry for the given file block, which must be a direct file // block containing data). Returns the full BlockPointer // associated with that ID, including key and data versions. // If no ID is known, return an uninitialized BlockPointer and // a nil error. CheckForKnownPtr(tlf tlf.ID, block *FileBlock) (BlockPointer, error) // DeleteTransient removes the transient entry for the given // ID from the cache, as well as any cached IDs so the block // won't be reused. DeleteTransient(id kbfsblock.ID, tlf tlf.ID) error // Delete removes the permanent entry for the non-dirty block // associated with the given block ID from the cache. No // error is returned if no block exists for the given ID. DeletePermanent(id kbfsblock.ID) error // DeleteKnownPtr removes the cached ID for the given file // block. It does not remove the block itself. DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error // GetWithLifetime retrieves a block from the cache, along with // the block's lifetime. GetWithLifetime(ptr BlockPointer) ( block Block, lifetime BlockCacheLifetime, err error) // SetCleanBytesCapacity atomically sets clean bytes capacity for block // cache. SetCleanBytesCapacity(capacity uint64) // GetCleanBytesCapacity atomically gets clean bytes capacity for block // cache. GetCleanBytesCapacity() (capacity uint64) }
BlockCache specifies the interface of BlockCacheSimple, and also more advanced and internal methods.
type BlockCacheHashBehavior ¶
type BlockCacheHashBehavior int
BlockCacheHashBehavior denotes whether the cache should hash the plaintext of a new block or not.
const ( // SkipCacheHash means that the plaintext of a block should not be hashed. SkipCacheHash BlockCacheHashBehavior = iota // DoCacheHash means that the plaintext of a block should be hashed. DoCacheHash )
type BlockCacheLifetime ¶
type BlockCacheLifetime int
BlockCacheLifetime denotes the lifetime of an entry in BlockCache.
const ( // NoCacheEntry means that the entry will not be cached. NoCacheEntry BlockCacheLifetime = iota // TransientEntry means that the cache entry may be evicted at // any time. TransientEntry // PermanentEntry means that the cache entry must remain until // explicitly removed from the cache. PermanentEntry )
func (BlockCacheLifetime) String ¶
func (l BlockCacheLifetime) String() string
type BlockCacheSimple ¶
type BlockCacheSimple interface { // Get gets the block associated with the given block ID. Get(ptr BlockPointer) (Block, error) // Put stores the final (content-addressable) block associated // with the given block ID. If lifetime is TransientEntry, then it // is assumed that the block exists on the server and the entry // may be evicted from the cache at any time. If lifetime is // PermanentEntry, then it is assumed that the block doesn't exist // on the server and must remain in the cache until explicitly // removed. As an intermediary state, as when a block is being // sent to the server, the block may be put into the cache both // with TransientEntry and PermanentEntry -- these are two // separate entries. This is fine, since the block should be the // same. `hashBehavior` indicates whether the plaintext contents // of transient, direct blocks should be hashed, in order to // identify blocks that can be de-duped. Put(ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, hashBehavior BlockCacheHashBehavior) error }
BlockCacheSimple gets and puts plaintext dir blocks and file blocks into a cache. These blocks are immutable and identified by their content hash.
type BlockCacheStandard ¶
type BlockCacheStandard struct {
// contains filtered or unexported fields
}
BlockCacheStandard implements the BlockCache interface by storing blocks in an in-memory LRU cache. Clean blocks are identified internally by just their block ID (since blocks are immutable and content-addressable).
func NewBlockCacheStandard ¶
func NewBlockCacheStandard(transientCapacity int, cleanBytesCapacity uint64) *BlockCacheStandard
NewBlockCacheStandard constructs a new BlockCacheStandard instance with the given transient capacity (in number of entries) and the clean bytes capacity, which is the total of number of bytes allowed between the transient and permanent clean caches. If putting a block will exceed this bytes capacity, transient entries are evicted until the block will fit in capacity.
func (*BlockCacheStandard) CheckForKnownPtr ¶
func (b *BlockCacheStandard) CheckForKnownPtr(tlf tlf.ID, block *FileBlock) ( BlockPointer, error)
CheckForKnownPtr implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) DeleteKnownPtr ¶
func (b *BlockCacheStandard) DeleteKnownPtr(tlf tlf.ID, block *FileBlock) error
DeleteKnownPtr implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) DeletePermanent ¶
func (b *BlockCacheStandard) DeletePermanent(id kbfsblock.ID) error
DeletePermanent implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) DeleteTransient ¶
DeleteTransient implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) Get ¶
func (b *BlockCacheStandard) Get(ptr BlockPointer) (Block, error)
Get implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) GetCleanBytesCapacity ¶
func (b *BlockCacheStandard) GetCleanBytesCapacity() (capacity uint64)
GetCleanBytesCapacity implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) GetWithLifetime ¶
func (b *BlockCacheStandard) GetWithLifetime(ptr BlockPointer) ( Block, BlockCacheLifetime, error)
GetWithLifetime implements the BlockCache interface for BlockCacheStandard.
func (*BlockCacheStandard) NumCleanTransientBlocks ¶
func (b *BlockCacheStandard) NumCleanTransientBlocks() int
NumCleanTransientBlocks returns the number of blocks in the cache with transient lifetimes.
func (*BlockCacheStandard) Put ¶
func (b *BlockCacheStandard) Put( ptr BlockPointer, tlf tlf.ID, block Block, lifetime BlockCacheLifetime, hashBehavior BlockCacheHashBehavior) error
Put implements the BlockCache interface for BlockCacheStandard. This method is idempotent for a given ptr, but that invariant is not currently goroutine-safe, and it does not hold if a block size changes between Puts. That is, we assume that a cached block associated with a given pointer will never change its size, even when it gets Put into the cache again.
func (*BlockCacheStandard) SetCleanBytesCapacity ¶
func (b *BlockCacheStandard) SetCleanBytesCapacity(capacity uint64)
SetCleanBytesCapacity implements the BlockCache interface for BlockCacheStandard.
type BlockDirectType ¶
type BlockDirectType int
BlockDirectType indicates to what kind of block (direct or indirect) a BlockPointer points.
const ( // UnknownDirectType indicates an old block that was written // before we started labeling pointers. UnknownDirectType BlockDirectType = 0 // DirectBlock indicates the pointed-to block has no indirect // pointers. DirectBlock BlockDirectType = 1 // IndirectBlock indicates the pointed-to block has indirect // pointers. IndirectBlock BlockDirectType = 2 )
func (BlockDirectType) String ¶
func (bdt BlockDirectType) String() string
type BlockInfo ¶
type BlockInfo struct { BlockPointer // When non-zero, the size of the encoded (and possibly // encrypted) data contained in the block. When non-zero, // always at least the size of the plaintext data contained in // the block. EncodedSize uint32 `codec:"e"` }
BlockInfo contains all information about a block in KBFS and its contents.
NOTE: Don't add or modify anything in this struct without considering how old clients will handle them.
type BlockPointer ¶
type BlockPointer struct { ID kbfsblock.ID `codec:"i"` KeyGen kbfsmd.KeyGen `codec:"k"` // if valid, which generation of the TLF{Writer,Reader}KeyBundle to use. DataVer Ver `codec:"d"` // if valid, which version of the KBFS data structures is pointed to DirectType BlockDirectType `codec:"t,omitempty"` // the type (direct, indirect, or unknown [if omitted]) of the pointed-to block kbfsblock.Context }
BlockPointer contains the identifying information for a block in KBFS.
NOTE: Don't add or modify anything in this struct without considering how old clients will handle them.
var ZeroPtr BlockPointer
ZeroPtr represents an empty BlockPointer.
func (BlockPointer) IsInitialized ¶
func (p BlockPointer) IsInitialized() bool
IsInitialized returns whether or not this BlockPointer has non-nil data.
func (BlockPointer) IsValid ¶
func (p BlockPointer) IsValid() bool
IsValid returns whether the block pointer is valid. A zero block pointer is considered invalid.
func (BlockPointer) Ref ¶
func (p BlockPointer) Ref() BlockRef
Ref returns the BlockRef equivalent of this pointer.
func (BlockPointer) String ¶
func (p BlockPointer) String() string
type BlockPutState ¶
type BlockPutState interface { AddNewBlock( ctx context.Context, blockPtr BlockPointer, block Block, readyBlockData ReadyBlockData, syncedCb func() error) error SaveOldPtr(ctx context.Context, oldPtr BlockPointer) error }
BlockPutState is an interface for keeping track of readied blocks before putting them to the bserver.
type BlockRef ¶
BlockRef is a block ID/ref nonce pair, which defines a unique reference to a block.
type BlockReqType ¶
type BlockReqType int
BlockReqType indicates whether an operation makes block modifications or not
const ( // BlockRead indicates a block read request. BlockRead BlockReqType = iota // BlockWrite indicates a block write request. BlockWrite // BlockReadParallel indicates a block read request that is // happening from a different goroutine than the blockLock rlock // holder, using the same lState. BlockReadParallel // BlockLookup indicates a lookup for a block for the purposes of // creating a new node in the node cache for it; avoid any unlocks // as part of the lookup process. BlockLookup )
type BlockSplitter ¶
type BlockSplitter interface { // CopyUntilSplit copies data into the block until we reach the // point where we should split, but only if writing to the end of // the last block. If this is writing into the middle of a file, // just copy everything that will fit into the block, and assume // that block boundaries will be fixed later. Return how much was // copied. CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64 // CheckSplit, given a block, figures out whether it ends at the // right place. If so, return 0. If not, return either the // offset in the block where it should be split, or -1 if more // bytes from the next block should be appended. CheckSplit(block *FileBlock) int64 // MaxPtrsPerBlock describes the number of indirect pointers we // can fit into one indirect block. MaxPtrsPerBlock() int // ShouldEmbedData decides whether we should keep the data of size // `size` embedded in the MD or not. ShouldEmbedData(size uint64) bool // SplitDirIfNeeded splits a direct DirBlock into multiple blocks // if needed. It may modify `block`. If a split isn't needed, it // returns a one-element slice containing `block`. If a split is // needed, it returns a non-nil offset for the new block. SplitDirIfNeeded(block *DirBlock) ([]*DirBlock, *StringOffset) }
BlockSplitter decides when a file block needs to be split
type BlockSplitterSimple ¶
type BlockSplitterSimple struct {
// contains filtered or unexported fields
}
BlockSplitterSimple implements the BlockSplitter interface by using a simple max-size algorithm to determine when to split blocks.
func NewBlockSplitterSimple ¶
func NewBlockSplitterSimple(desiredBlockSize int64, blockChangeEmbedMaxSize uint64, codec kbfscodec.Codec) ( *BlockSplitterSimple, error)
NewBlockSplitterSimple creates a new BlockSplittleSimple and adjusts the max size to try to match the desired size for file blocks, given the overhead of encoding a file block and the round-up padding we do.
func NewBlockSplitterSimpleExact ¶
func NewBlockSplitterSimpleExact( maxSize int64, maxPtrsPerBlock int, blockChangeEmbedMaxSize uint64) ( *BlockSplitterSimple, error)
NewBlockSplitterSimpleExact returns a BlockSplitterSimple with the max block size set to an exact value.
func (*BlockSplitterSimple) CheckSplit ¶
func (b *BlockSplitterSimple) CheckSplit(block *FileBlock) int64
CheckSplit implements the BlockSplitter interface for BlockSplitterSimple.
func (*BlockSplitterSimple) CopyUntilSplit ¶
func (b *BlockSplitterSimple) CopyUntilSplit( block *FileBlock, lastBlock bool, data []byte, off int64) int64
CopyUntilSplit implements the BlockSplitter interface for BlockSplitterSimple.
func (*BlockSplitterSimple) MaxPtrsPerBlock ¶
func (b *BlockSplitterSimple) MaxPtrsPerBlock() int
MaxPtrsPerBlock implements the BlockSplitter interface for BlockSplitterSimple.
func (*BlockSplitterSimple) MaxSize ¶
func (b *BlockSplitterSimple) MaxSize() int64
MaxSize returns the max block size.
func (*BlockSplitterSimple) SetBlockChangeEmbedMaxSizeForTesting ¶
func (b *BlockSplitterSimple) SetBlockChangeEmbedMaxSizeForTesting( newSize uint64)
SetBlockChangeEmbedMaxSizeForTesting sets the max size for block change embeds, which is useful for testing. It is not goroutine-safe.
func (*BlockSplitterSimple) SetMaxDirEntriesByBlockSize ¶
func (b *BlockSplitterSimple) SetMaxDirEntriesByBlockSize( codec kbfscodec.Codec) error
SetMaxDirEntriesByBlockSize sets the maximum number of directory entries per directory block, based on the maximum block size. If the `KEYBASE_BSPLIT_MAX_DIR_ENTRIES` is set, this function does nothing.
func (*BlockSplitterSimple) SetMaxDirEntriesPerBlockForTesting ¶
func (b *BlockSplitterSimple) SetMaxDirEntriesPerBlockForTesting(newMax int)
SetMaxDirEntriesPerBlockForTesting sets the max dir entries for a block, which is useful for testing. It is not goroutine-safe.
func (*BlockSplitterSimple) ShouldEmbedData ¶
func (b *BlockSplitterSimple) ShouldEmbedData(size uint64) bool
ShouldEmbedData implements the BlockSplitter interface for BlockSplitterSimple.
func (*BlockSplitterSimple) SplitDirIfNeeded ¶
func (b *BlockSplitterSimple) SplitDirIfNeeded(block *DirBlock) ( []*DirBlock, *StringOffset)
SplitDirIfNeeded implements the BlockSplitter interface for BlockSplitterSimple.
type BlockWithPtrs ¶
type BlockWithPtrs interface { Block // FirstOffset returns the offset of the indirect pointer that // points to the first (left-most) block in a block tree. FirstOffset() Offset // NumIndirectPtrs returns the number of indirect pointers in this // block. The behavior is undefined when called on a non-indirect // block. NumIndirectPtrs() int // IndirectPtr returns the block info and offset for the indirect // pointer at index `i`. The behavior is undefined when called on // a non-indirect block. IndirectPtr(i int) (BlockInfo, Offset) // AppendNewIndirectPtr appends a new indirect pointer at the // given offset. AppendNewIndirectPtr(ptr BlockPointer, off Offset) // ClearIndirectPtrSize clears the encoded size of the indirect // pointer stored at index `i`. ClearIndirectPtrSize(i int) // SetIndirectPtrType set the type of the indirect pointer stored // at index `i`. SetIndirectPtrType(i int, dt BlockDirectType) // SetIndirectPtrOff set the offset of the indirect pointer stored // at index `i`. SetIndirectPtrOff(i int, off Offset) // SetIndirectPtrInfo sets the block info of the indirect pointer // stored at index `i`. SetIndirectPtrInfo(i int, info BlockInfo) // SwapIndirectPtrs swaps the indirect ptr at `i` in this block // with the one at `otherI` in `other`. SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int) }
BlockWithPtrs defines methods needed for interacting with indirect pointers.
func NewDirBlockWithPtrs ¶
func NewDirBlockWithPtrs(isInd bool) BlockWithPtrs
NewDirBlockWithPtrs creates a new, empty DirBlock.
func NewFileBlockWithPtrs ¶
func NewFileBlockWithPtrs(isInd bool) BlockWithPtrs
NewFileBlockWithPtrs creates a new, empty FileBlock.
type BranchName ¶
type BranchName string
BranchName is the name given to a KBFS branch, for a particular top-level folder. Currently, the notion of a "branch" is client-side only, and can be used to specify which root to use for a top-level folder. (For example, viewing a historical archive could use a different branch name.)
const ( // MasterBranch represents the mainline branch for a top-level // folder. Set to the empty string so that the default will be // the master branch. MasterBranch BranchName = "" )
func MakeRevBranchName ¶
func MakeRevBranchName(rev kbfsmd.Revision) BranchName
MakeRevBranchName returns a branch name specifying an archive branch pinned to the given revision number.
func (BranchName) IsArchived ¶
func (bn BranchName) IsArchived() bool
IsArchived returns true if the branch specifies an archived revision.
func (BranchName) RevisionIfSpecified ¶
func (bn BranchName) RevisionIfSpecified() (kbfsmd.Revision, bool)
RevisionIfSpecified returns a valid revision number and true if `bn` is a revision branch.
type CachePutCacheFullError ¶
CachePutCacheFullError indicates that a cache put failed because the cache was full.
func (CachePutCacheFullError) Error ¶
func (e CachePutCacheFullError) Error() string
type CommonBlock ¶
type CommonBlock struct { // IsInd indicates where this block is so big it requires indirect pointers IsInd bool `codec:"s"` codec.UnknownFieldSetHandler // contains filtered or unexported fields }
CommonBlock holds block data that is common for both subdirectories and files.
func NewCommonBlockForTesting ¶
func NewCommonBlockForTesting( isInd bool, cachedEncodedSize uint32) CommonBlock
NewCommonBlockForTesting returns a common block with some of the internal state set, which is useful for testing.
func (*CommonBlock) BytesCanBeDirtied ¶
func (cb *CommonBlock) BytesCanBeDirtied() int64
BytesCanBeDirtied implements the Block interface for CommonBlock.
func (*CommonBlock) DataVersion ¶
func (cb *CommonBlock) DataVersion() Ver
DataVersion returns data version for this block.
func (*CommonBlock) DeepCopy ¶
func (cb *CommonBlock) DeepCopy() CommonBlock
DeepCopy copies a CommonBlock without the lock.
func (*CommonBlock) GetEncodedSize ¶
func (cb *CommonBlock) GetEncodedSize() uint32
GetEncodedSize implements the Block interface for CommonBlock
func (*CommonBlock) IsIndirect ¶
func (cb *CommonBlock) IsIndirect() bool
IsIndirect implements the Block interface for CommonBlock.
func (*CommonBlock) IsTail ¶
func (cb *CommonBlock) IsTail() bool
IsTail implements the Block interface for CommonBlock.
func (*CommonBlock) NewEmptier ¶
func (cb *CommonBlock) NewEmptier() func() Block
NewEmptier implements the Block interface for CommonBlock.
func (*CommonBlock) NewEmpty ¶
func (cb *CommonBlock) NewEmpty() Block
NewEmpty implements the Block interface for CommonBlock.
func (*CommonBlock) OffsetExceedsData ¶
func (cb *CommonBlock) OffsetExceedsData(_, _ Offset) bool
OffsetExceedsData implements the Block interface for CommonBlock.
func (*CommonBlock) Set ¶
func (cb *CommonBlock) Set(other Block)
Set implements the Block interface for CommonBlock.
func (*CommonBlock) SetEncodedSize ¶
func (cb *CommonBlock) SetEncodedSize(size uint32)
SetEncodedSize implements the Block interface for CommonBlock
func (*CommonBlock) ToCommonBlock ¶
func (cb *CommonBlock) ToCommonBlock() *CommonBlock
ToCommonBlock implements the Block interface for CommonBlock.
type DirBlock ¶
type DirBlock struct { CommonBlock // if not indirect, a map of path name to directory entry Children map[string]DirEntry `codec:"c,omitempty"` // if indirect, contains the indirect pointers to the next level of blocks IPtrs []IndirectDirPtr `codec:"i,omitempty"` }
DirBlock is the contents of a directory
func (*DirBlock) AppendNewIndirectPtr ¶
func (db *DirBlock) AppendNewIndirectPtr(ptr BlockPointer, off Offset)
AppendNewIndirectPtr implements the BlockWithPtrs interface for FileBlock.
func (*DirBlock) BytesCanBeDirtied ¶
BytesCanBeDirtied implements the Block interface for DirBlock.
func (*DirBlock) ClearIndirectPtrSize ¶
ClearIndirectPtrSize implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) DataVersion ¶
DataVersion returns data version for this block, which is assumed to have been modified locally.
func (*DirBlock) FirstOffset ¶
FirstOffset implements the Block interface for DirBlock.
func (*DirBlock) IndirectPtr ¶
IndirectPtr implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) NewEmptier ¶
NewEmptier implements the Block interface for DirBlock.
func (*DirBlock) NumIndirectPtrs ¶
NumIndirectPtrs implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) OffsetExceedsData ¶
OffsetExceedsData implements the Block interface for DirBlock.
func (*DirBlock) SetIndirectPtrInfo ¶
SetIndirectPtrInfo implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) SetIndirectPtrOff ¶
SetIndirectPtrOff implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) SetIndirectPtrType ¶
func (db *DirBlock) SetIndirectPtrType(i int, dt BlockDirectType)
SetIndirectPtrType implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) SwapIndirectPtrs ¶
func (db *DirBlock) SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int)
SwapIndirectPtrs implements the BlockWithPtrs interface for DirBlock.
func (*DirBlock) ToCommonBlock ¶
func (db *DirBlock) ToCommonBlock() *CommonBlock
ToCommonBlock implements the Block interface for DirBlock.
func (*DirBlock) TotalPlainSizeEstimate ¶
func (db *DirBlock) TotalPlainSizeEstimate( plainSize int, bsplit BlockSplitter) int
TotalPlainSizeEstimate returns an estimate of the plaintext size of this directory block.
type DirData ¶
type DirData struct {
// contains filtered or unexported fields
}
DirData is a helper struct for accessing and manipulating data within a directory. It's meant for use within a single scope, not for long-term storage. The caller must ensure goroutine-safety.
func NewDirData ¶
func NewDirData( dir Path, chargedTo keybase1.UserOrTeamID, bsplit BlockSplitter, kmd libkey.KeyMetadata, getter dirBlockGetter, cacher dirtyBlockCacher, log logger.Logger, vlog *libkb.VDebugLog) *DirData
NewDirData creates a new DirData instance.
func (*DirData) AddEntry ¶
func (dd *DirData) AddEntry( ctx context.Context, newName string, newDe DirEntry) ( unrefs []BlockInfo, err error)
AddEntry adds a new entry to this directory.
func (*DirData) GetChildren ¶
GetChildren returns a map of all the child EntryInfos in this directory.
func (*DirData) GetDirtyChildPtrs ¶
func (dd *DirData) GetDirtyChildPtrs( ctx context.Context, dirtyBcache IsDirtyProvider) ( ptrs map[BlockPointer]bool, err error)
GetDirtyChildPtrs returns a set of dirty child pointers (not the root pointer) for the directory.
func (*DirData) GetEntries ¶
GetEntries returns a map of all the child DirEntrys in this directory.
func (*DirData) GetIndirectDirBlockInfos ¶
GetIndirectDirBlockInfos returns all of the BlockInfos for blocks pointed to by indirect blocks within this directory tree.
func (*DirData) GetTopBlock ¶
GetTopBlock returns the top-most block in this directory block tree.
func (*DirData) Lookup ¶
Lookup returns the DirEntry for the given entry named by `name` in this directory.
func (*DirData) Ready ¶
func (dd *DirData) Ready(ctx context.Context, id tlf.ID, bcache BlockCache, dirtyBcache IsDirtyProvider, rp ReadyProvider, bps BlockPutState, topBlock *DirBlock) (map[BlockInfo]BlockPointer, error)
Ready readies all the dirty child blocks for a directory tree with an indirect top-block, and updates their block IDs in their parent block's list of indirect pointers. It returns a map pointing from the new block info from any readied block to its corresponding old block pointer.
func (*DirData) RemoveEntry ¶
RemoveEntry removes an entry from this directory.
type DirEntries ¶
type DirEntries []DirEntryWithName
DirEntries is a slice of `DirEntryWithName` instances.
func DirEntryMapToDirEntries ¶
func DirEntryMapToDirEntries(entryMap map[string]DirEntry) DirEntries
DirEntryMapToDirEntries returns a `DirEntries` slice of all the entries in the given map.
func (DirEntries) Len ¶
func (d DirEntries) Len() int
Len implements the sort.Interface interface for DirEntries.
func (DirEntries) Swap ¶
func (d DirEntries) Swap(i, j int)
Swap implements the sort.Interface interface for DirEntries.
type DirEntriesBySizeAsc ¶
type DirEntriesBySizeAsc struct{ DirEntries }
DirEntriesBySizeAsc sorts entries in order of ascending name.
func (DirEntriesBySizeAsc) Less ¶
func (d DirEntriesBySizeAsc) Less(i, j int) bool
Less implements the sort.Interface interface for DirEntriesBySizeAsc.
type DirEntriesBySizeDesc ¶
type DirEntriesBySizeDesc struct{ DirEntries }
DirEntriesBySizeDesc sorts entries in order of descending name.
func (DirEntriesBySizeDesc) Less ¶
func (d DirEntriesBySizeDesc) Less(i, j int) bool
Less implements the sort.Interface interface for DirEntriesBySizeDesc.
type DirEntry ¶
type DirEntry struct { BlockInfo EntryInfo codec.UnknownFieldSetHandler }
DirEntry is all the data info a directory know about its child.
func (*DirEntry) IsInitialized ¶
IsInitialized returns true if this DirEntry has been initialized.
type DirEntryWithName ¶
type DirEntryWithName struct { DirEntry // contains filtered or unexported fields }
DirEntryWithName combines a DirEntry with the name pointing to that entry within a directory.
type DirtyBlockCache ¶
type DirtyBlockCache interface { IsDirtyProvider DirtyBlockCacheSimple // Delete removes the dirty block associated with the given block // pointer and branch from the cache. No error is returned if no // block exists for the given ID. Delete(tlfID tlf.ID, ptr BlockPointer, branch BranchName) error // IsAnyDirty returns whether there are any dirty blocks in the // cache. tlfID may be ignored. IsAnyDirty(tlfID tlf.ID) bool // RequestPermissionToDirty is called whenever a user wants to // write data to a file. The caller provides an estimated number // of bytes that will become dirty -- this is difficult to know // exactly without pre-fetching all the blocks involved, but in // practice we can just use the number of bytes sent in via the // Write. It returns a channel that blocks until the cache is // ready to receive more dirty data, at which point the channel is // closed. The user must call // `UpdateUnsyncedBytes(-estimatedDirtyBytes)` once it has // completed its write and called `UpdateUnsyncedBytes` for all // the exact dirty block sizes. RequestPermissionToDirty(ctx context.Context, tlfID tlf.ID, estimatedDirtyBytes int64) (DirtyPermChan, error) // UpdateUnsyncedBytes is called by a user, who has already been // granted permission to write, with the delta in block sizes that // were dirtied as part of the write. So for example, if a // newly-dirtied block of 20 bytes was extended by 5 bytes, they // should send 25. If on the next write (before any syncs), bytes // 10-15 of that same block were overwritten, they should send 0 // over the channel because there were no new bytes. If an // already-dirtied block is truncated, or if previously requested // bytes have now been updated more accurately in previous // requests, newUnsyncedBytes may be negative. wasSyncing should // be true if `BlockSyncStarted` has already been called for this // block. UpdateUnsyncedBytes(tlfID tlf.ID, newUnsyncedBytes int64, wasSyncing bool) // UpdateSyncingBytes is called when a particular block has // started syncing, or with a negative number when a block is no // longer syncing due to an error (and BlockSyncFinished will // never be called). UpdateSyncingBytes(tlfID tlf.ID, size int64) // BlockSyncFinished is called when a particular block has // finished syncing, though the overall sync might not yet be // complete. This lets the cache know it might be able to grant // more permission to writers. BlockSyncFinished(tlfID tlf.ID, size int64) // SyncFinished is called when a complete sync has completed and // its dirty blocks have been removed from the cache. This lets // the cache know it might be able to grant more permission to // writers. SyncFinished(tlfID tlf.ID, size int64) // ShouldForceSync returns true if the sync buffer is full enough // to force all callers to sync their data immediately. ShouldForceSync(tlfID tlf.ID) bool // Shutdown frees any resources associated with this instance. It // returns an error if there are any unsynced blocks. Shutdown() error }
DirtyBlockCache gets and puts plaintext dir blocks and file blocks into a cache, which have been modified by the application and not yet committed on the KBFS servers. They are identified by a (potentially random) ID that may not have any relationship with their context, along with a Branch in case the same TLF is being modified via multiple branches. Dirty blocks are never evicted, they must be deleted explicitly.
type DirtyBlockCacheSimple ¶
type DirtyBlockCacheSimple interface { // Get gets the block associated with the given block ID. Returns // the dirty block for the given ID, if one exists. Get( ctx context.Context, tlfID tlf.ID, ptr BlockPointer, branch BranchName) (Block, error) // Put stores a dirty block currently identified by the // given block pointer and branch name. Put( ctx context.Context, tlfID tlf.ID, ptr BlockPointer, branch BranchName, block Block) error }
DirtyBlockCacheSimple is a bare-bones interface for a dirty block cache.
type DirtyBlockCacheStandard ¶
type DirtyBlockCacheStandard struct {
// contains filtered or unexported fields
}
DirtyBlockCacheStandard implements the DirtyBlockCache interface by storing blocks in an in-memory cache. Dirty blocks are identified by their block ID, branch name, and reference nonce, since the same block may be forked and modified on different branches and under different references simultaneously.
DirtyBlockCacheStandard controls how fast uses can write into KBFS, and does so with a TCP-like slow-start algorithm that adjusts itself according to how fast bytes are synced to the server. Conceptually, there are two buffers:
syncBuf: The bytes that are currently syncing, or have finished syncing, back to the servers. Each TLF has only one sync at a time, but multiple TLFs may be syncing at the same time. We also track how many bytes within this buffer have finished syncing. waitBuf: The bytes that have not yet begun syncing to the servers. Again, this can be for multiple TLFs, and from multiple files within a TLF. In the TCP analogy, think of this as the congestion window (cwnd).
The goal is to make sure that syncBuf can always be transmitted to the server within the file system operation timeout forced on us by the layer that interacts with the file system (19 seconds on OS X and Windows, defaults to 30 seconds for other layers if not already set). In fact, ideally the data would be transmitted in HALF of the file system operation timeout, in case a user Sync operation gets blocked behind a background Sync operation when there is significant data in waitBuf. At the same time, we want it to be as big as possible, because we get the best performance when writing lots of blocks in parallel to the servers. So, we want an algorithm that allows waitBuf to grow, without causing the next sync (or write, or setattr, etc) operation to timeout. For the purposes of this discussion, let's assume there is only one active TLF at a time.
We allow the user to set a min, start, and max size for waitBuf. Whenever a sync starts, bytes are transferred from waitBuf into syncBuf and a timer is started. When a sync completes successfully, the number of bytes synced is added to the allowed size of waitBuf (i.e., "additive increase" == exponential growth). However, if the number of sync'd bytes is smaller than the min waitBuf size, we don't do additive increase (because we haven't really tested the throughput of the server connection in that case).
If the sync takes more than 33% of half the overall operation timeout, the size of waitBuf is reduced by that same percentage (i.e., "multiplicative decrease"), and any outstanding bytes in the sync will not be used in the "additive increase" phase when the sync completes (they are considered "lost" in the TCP analogy, even though they should eventually succeed). The 33% limit was chosen mostly by trial and error, although if you assume that capacity(waitBuf) will double after each sync, then `2*len(syncBuf) == capacity(waitBuf)`, so at any given point there can be about 3*capacity(syncBuf) bytes buffered; so if syncBuf can't be sync'd in a third of the overall timeout, the next waitBuf should be reduced.
Finally, we need to make sure that the Write calls that are filling up waitBuf while a sync is happening don't timeout. But, we also need to fill waitBuf quickly, so that the next sync is ready to go as soon as the first one completes. Here we implement a compromise. Allow waitBuf to fill up instantly until it holds capacity(syncBuf) bytes. After that, allow it to fill up to 2*capacity(syncBuf), but artificially delay each write by adding backpressure, by some fraction of the system operation timeout that matches the fraction of the progress the buffer has made between capacity(syncBuf) and 2*capacity(syncBuf). As soon as the sync completes, any delayed write is unblocked and gets to start filling up the buffers again.
To avoid keeping the buffer capacity large when network conditions suddenly worsen (say after a laptop sleep when it comes back online on a new, worse network), the capacity is reset back to the minimum if resetBufferCapTime passes without any large syncs. TODO: in the future it might make sense to decrease the buffer capacity, rather than resetting it to the minimum?
func NewDirtyBlockCacheStandard ¶
func NewDirtyBlockCacheStandard( clock idutil.Clock, log logger.Logger, vlog *libkb.VDebugLog, minSyncBufCap int64, maxSyncBufCap int64, startSyncBufCap int64) *DirtyBlockCacheStandard
NewDirtyBlockCacheStandard constructs a new BlockCacheStandard instance. The min and max buffer capacities define the possible range of how many bytes we'll try to sync in any one sync, and the start size defines the initial buffer size.
func SimpleDirtyBlockCacheStandard ¶
func SimpleDirtyBlockCacheStandard() *DirtyBlockCacheStandard
SimpleDirtyBlockCacheStandard that can only handle block put/get/delete requests; it cannot track dirty bytes.
func (*DirtyBlockCacheStandard) BlockSyncFinished ¶
func (d *DirtyBlockCacheStandard) BlockSyncFinished(_ tlf.ID, size int64)
BlockSyncFinished implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) Delete ¶
func (d *DirtyBlockCacheStandard) Delete(_ tlf.ID, ptr BlockPointer, branch BranchName) error
Delete implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) Get ¶
func (d *DirtyBlockCacheStandard) Get( _ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName) ( Block, error)
Get implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) IsAnyDirty ¶
func (d *DirtyBlockCacheStandard) IsAnyDirty(_ tlf.ID) bool
IsAnyDirty implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) IsDirty ¶
func (d *DirtyBlockCacheStandard) IsDirty(_ tlf.ID, ptr BlockPointer, branch BranchName) (isDirty bool)
IsDirty implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) Put ¶
func (d *DirtyBlockCacheStandard) Put( _ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName, block Block) error
Put implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) RequestPermissionToDirty ¶
func (d *DirtyBlockCacheStandard) RequestPermissionToDirty( ctx context.Context, _ tlf.ID, estimatedDirtyBytes int64) ( DirtyPermChan, error)
RequestPermissionToDirty implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) ShouldForceSync ¶
func (d *DirtyBlockCacheStandard) ShouldForceSync(_ tlf.ID) bool
ShouldForceSync implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) Shutdown ¶
func (d *DirtyBlockCacheStandard) Shutdown() error
Shutdown implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) Size ¶
func (d *DirtyBlockCacheStandard) Size() int
Size returns the number of blocks currently in the cache.
func (*DirtyBlockCacheStandard) SyncFinished ¶
func (d *DirtyBlockCacheStandard) SyncFinished(_ tlf.ID, size int64)
SyncFinished implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) UpdateSyncingBytes ¶
func (d *DirtyBlockCacheStandard) UpdateSyncingBytes(_ tlf.ID, size int64)
UpdateSyncingBytes implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
func (*DirtyBlockCacheStandard) UpdateUnsyncedBytes ¶
func (d *DirtyBlockCacheStandard) UpdateUnsyncedBytes(_ tlf.ID, newUnsyncedBytes int64, wasSyncing bool)
UpdateUnsyncedBytes implements the DirtyBlockCache interface for DirtyBlockCacheStandard.
type DirtyFile ¶
type DirtyFile struct { Path Path // contains filtered or unexported fields }
DirtyFile represents a particular file that's been written to, but has not yet completed syncing its dirty blocks to the server.
func NewDirtyFile ¶
func NewDirtyFile(file Path, dirtyBcache DirtyBlockCache) *DirtyFile
NewDirtyFile constructs a new `DirtyFile` instance.
func (*DirtyFile) AddDeferredNewBytes ¶
AddDeferredNewBytes adds `bytes` to the count of all the bytes that have been deferred until after the current sync finishes.
func (*DirtyFile) AddErrListener ¶
AddErrListener adds a callback that will be invoked if an error happens during the sync.
func (*DirtyFile) AssimilateDeferredNewBytes ¶
func (df *DirtyFile) AssimilateDeferredNewBytes()
AssimilateDeferredNewBytes is called to indicate that any deferred bytes should be included in the count of the next sync.
func (*DirtyFile) BlockNeedsCopy ¶
func (df *DirtyFile) BlockNeedsCopy(ptr BlockPointer) bool
BlockNeedsCopy returns true if the block should be copied by anyone who next tries to modify it.
func (*DirtyFile) FinishSync ¶
FinishSync is called to indicate that a sync has finished successfully.
func (*DirtyFile) IsBlockOrphaned ¶
func (df *DirtyFile) IsBlockOrphaned(ptr BlockPointer) bool
IsBlockOrphaned returns true if the block has been orphaned and can no longer be reached in the file.
func (*DirtyFile) NotifyErrListeners ¶
NotifyErrListeners notifies all registered callbacks that an error happened, if `err` is `nil`. It also resets the registered listeners.
func (*DirtyFile) NumErrListeners ¶
NumErrListeners returns the number of registered error listeners.
func (*DirtyFile) ResetSyncingBlocksToDirty ¶
func (df *DirtyFile) ResetSyncingBlocksToDirty()
ResetSyncingBlocksToDirty can be called when a sync failed, and all the syncing blocks need to transition back to being dirty.
func (*DirtyFile) SetBlockDirty ¶
func (df *DirtyFile) SetBlockDirty(ptr BlockPointer) ( needsCaching bool, isSyncing bool)
SetBlockDirty transitions a block to a dirty state, and returns whether or not the block needs to be put in the dirty cache (because it isn't yet), and whether or not the block is currently part of a sync in progress.
func (*DirtyFile) SetBlockOrphaned ¶
func (df *DirtyFile) SetBlockOrphaned(ptr BlockPointer, orphaned bool)
SetBlockOrphaned is called to indicate that a block has been orphaned, and can no longer be reached within the file.
func (*DirtyFile) SetBlockSyncing ¶
func (df *DirtyFile) SetBlockSyncing( ctx context.Context, ptr BlockPointer) error
SetBlockSyncing is called to indicate that the block pointed to by `ptr` is currently being synced.
func (*DirtyFile) UpdateNotYetSyncingBytes ¶
UpdateNotYetSyncingBytes adds `newBytes` to the number of outstanding to-be-synced bytes.
type DirtyPermChan ¶
type DirtyPermChan <-chan struct{}
DirtyPermChan is a channel that gets closed when the holder has permission to write. We are forced to define it as a type due to a bug in mockgen that can't handle return values with a chan struct{}.
type EntryInfo ¶
type EntryInfo struct { Type EntryType Size uint64 SymPath string `codec:",omitempty"` // must be within the same root dir // Mtime is in unix nanoseconds Mtime int64 // Ctime is in unix nanoseconds Ctime int64 // If this is a team TLF, we want to track the last writer of an // entry, since in the block, only the team ID will be tracked. TeamWriter keybase1.UID `codec:"tw,omitempty"` // Tracks a skiplist of the previous revisions for this entry. PrevRevisions PrevRevisions `codec:"pr,omitempty"` }
EntryInfo is the (non-block-related) info a directory knows about its child.
NOTE: Don't add or modify anything in this struct without considering how old clients will handle them (since this is embedded in DirEntry).
func EntryInfoFromFileInfo ¶
EntryInfoFromFileInfo converts an `os.FileInfo` into an `EntryInfo`, to the best of our ability to do so. The caller is responsible for filling in `EntryInfo.SymPath`, if needed.
type EntryType ¶
type EntryType int
EntryType is the type of a directory entry.
const ( // File is a regular file. File EntryType = iota // Exec is an executable file. Exec // Dir is a directory. Dir // Sym is a symbolic link. Sym // FakeFile can be used to indicate a faked-out entry for a file, // that will be specially processed by folderBranchOps. FakeFile EntryType = 0xfffe // FakeDir can be used to indicate a faked-out entry for a directory, // that will be specially processed by folderBranchOps. FakeDir EntryType = 0xffff )
type FileBlock ¶
type FileBlock struct { CommonBlock // if not indirect, the full contents of this block Contents []byte `codec:"c,omitempty"` // if indirect, contains the indirect pointers to the next level of blocks IPtrs []IndirectFilePtr `codec:"i,omitempty"` // contains filtered or unexported fields }
FileBlock is the contents of a file
func (*FileBlock) AppendNewIndirectPtr ¶
func (fb *FileBlock) AppendNewIndirectPtr(ptr BlockPointer, off Offset)
AppendNewIndirectPtr implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) BytesCanBeDirtied ¶
BytesCanBeDirtied implements the Block interface for FileBlock.
func (*FileBlock) ClearIndirectPtrSize ¶
ClearIndirectPtrSize implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) DataVersion ¶
DataVersion returns data version for this block, which is assumed to have been modified locally.
func (*FileBlock) FirstOffset ¶
FirstOffset implements the Block interface for FileBlock.
func (*FileBlock) GetHash ¶
func (fb *FileBlock) GetHash() kbfshash.RawDefaultHash
GetHash returns the hash of this FileBlock. If the hash is nil, it first calculates it.
func (*FileBlock) IndirectPtr ¶
IndirectPtr implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) NewEmptier ¶
NewEmptier implements the Block interface for FileBlock.
func (*FileBlock) NumIndirectPtrs ¶
NumIndirectPtrs implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) OffsetExceedsData ¶
OffsetExceedsData implements the Block interface for FileBlock.
func (*FileBlock) SetIndirectPtrInfo ¶
SetIndirectPtrInfo implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) SetIndirectPtrOff ¶
SetIndirectPtrOff implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) SetIndirectPtrType ¶
func (fb *FileBlock) SetIndirectPtrType(i int, dt BlockDirectType)
SetIndirectPtrType implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) SwapIndirectPtrs ¶
func (fb *FileBlock) SwapIndirectPtrs(i int, other BlockWithPtrs, otherI int)
SwapIndirectPtrs implements the BlockWithPtrs interface for FileBlock.
func (*FileBlock) ToCommonBlock ¶
func (fb *FileBlock) ToCommonBlock() *CommonBlock
ToCommonBlock implements the Block interface for FileBlock.
type FileBlockGetter ¶
type FileBlockGetter func(context.Context, libkey.KeyMetadata, BlockPointer, Path, BlockReqType) (fblock *FileBlock, wasDirty bool, err error)
FileBlockGetter is a function that gets a block suitable for reading or writing, and also returns whether the block was already dirty. It may be called from new goroutines, and must handle any required locks accordingly.
type FileData ¶
type FileData struct {
// contains filtered or unexported fields
}
FileData is a helper struct for accessing and manipulating data within a file. It's meant for use within a single scope, not for long-term storage. The caller must ensure goroutine-safety.
func NewFileData ¶
func NewFileData( file Path, chargedTo keybase1.UserOrTeamID, bsplit BlockSplitter, kmd libkey.KeyMetadata, getter FileBlockGetter, cacher dirtyBlockCacher, log logger.Logger, vlog *libkb.VDebugLog) *FileData
NewFileData makes a new file data object for the given `file` within the given `kmd`.
func (*FileData) DeepCopy ¶
func (fd *FileData) DeepCopy(ctx context.Context, dataVer Ver) ( newTopPtr BlockPointer, allChildPtrs []BlockPointer, err error)
DeepCopy makes a complete copy of this file, deduping leaf blocks and making new random BlockPointers for all indirect blocks. It returns the new top pointer of the copy, and all the new child pointers in the copy.
func (*FileData) FindIPtrsAndClearSize ¶
func (fd *FileData) FindIPtrsAndClearSize( ctx context.Context, topBlock *FileBlock, ptrs map[BlockPointer]bool) ( found map[BlockPointer]bool, err error)
FindIPtrsAndClearSize looks for the given set of indirect pointers, and returns whether they could be found. As a side effect, it also clears the encoded size for those indirect pointers.
func (*FileData) GetBytes ¶
func (fd *FileData) GetBytes(ctx context.Context, startOff, endOff Int64Offset) (data []byte, err error)
GetBytes returns a buffer containing data from the file, in the half-inclusive range `[startOff, endOff)`. If `endOff` == -1, it returns data until the end of the file.
func (*FileData) GetFileBlockAtOffset ¶
func (fd *FileData) GetFileBlockAtOffset(ctx context.Context, topBlock *FileBlock, off Int64Offset, rtype BlockReqType) ( ptr BlockPointer, parentBlocks []ParentBlockAndChildIndex, block *FileBlock, nextBlockStartOff, startOff Int64Offset, wasDirty bool, err error)
GetFileBlockAtOffset returns the leaf file block responsible for the given offset.
func (*FileData) GetIndirectFileBlockInfos ¶
GetIndirectFileBlockInfos returns the block infos contained in all the indirect blocks in this file tree.
func (*FileData) GetIndirectFileBlockInfosWithTopBlock ¶
func (fd *FileData) GetIndirectFileBlockInfosWithTopBlock( ctx context.Context, topBlock *FileBlock) ([]BlockInfo, error)
GetIndirectFileBlockInfosWithTopBlock returns the block infos contained in all the indirect blocks in this file tree, given an already-fetched top block.
func (*FileData) Read ¶
read fills the `dest` buffer with data from the file, starting at `startOff`. Returns the number of bytes copied. If the read operation nears the deadline set in `ctx`, it returns as big a prefix as possible before reaching the deadline.
func (*FileData) Ready ¶
func (fd *FileData) Ready(ctx context.Context, id tlf.ID, bcache BlockCache, dirtyBcache IsDirtyProvider, rp ReadyProvider, bps BlockPutState, topBlock *FileBlock, df *DirtyFile) ( map[BlockInfo]BlockPointer, error)
Ready readies, if given an indirect top-block, all the dirty child blocks, and updates their block IDs in their parent block's list of indirect pointers. It returns a map pointing from the new block info from any readied block to its corresponding old block pointer.
func (*FileData) ReadyNonLeafBlocksInCopy ¶
func (fd *FileData) ReadyNonLeafBlocksInCopy(ctx context.Context, bcache BlockCache, rp ReadyProvider, bps BlockPutState, topBlock *FileBlock) ([]BlockInfo, error)
ReadyNonLeafBlocksInCopy takes a top block that's been copied via deepCopy(), and readies all the non-leaf children of the top block. It adds all readied blocks to the provided `bps`. It returns the BlockInfos for all non-leaf children.
func (*FileData) Split ¶
func (fd *FileData) Split(ctx context.Context, id tlf.ID, dirtyBcache DirtyBlockCache, topBlock *FileBlock, df *DirtyFile) ( unrefs []BlockInfo, err error)
Split checks, if given an indirect top block of a file, whether any of the dirty leaf blocks in that file need to be split up differently (i.e., if the BlockSplitter is using fingerprinting-based boundaries). It returns the set of blocks that now need to be unreferenced.
func (*FileData) TruncateExtend ¶
func (fd *FileData) TruncateExtend(ctx context.Context, size uint64, topBlock *FileBlock, parentBlocks []ParentBlockAndChildIndex, oldDe DirEntry, df *DirtyFile) ( newDe DirEntry, dirtyPtrs []BlockPointer, err error)
TruncateExtend increases file size to the given size by appending a "hole" to the file. Return params:
- newDe: a new directory entry with the EncodedSize cleared.
- dirtyPtrs: a slice of the BlockPointers that have been dirtied during the truncate.
func (*FileData) TruncateShrink ¶
func (fd *FileData) TruncateShrink(ctx context.Context, size uint64, topBlock *FileBlock, oldDe DirEntry) ( newDe DirEntry, dirtyPtrs []BlockPointer, unrefs []BlockInfo, newlyDirtiedChildBytes int64, err error)
TruncateShrink shrinks the file to the given size. Return params:
- newDe: a new directory entry with the EncodedSize cleared if the file shrunk.
- dirtyPtrs: a slice of the BlockPointers that have been dirtied during the truncate. This includes any interior indirect blocks that may not have been changed yet, but which will need to change as part of the sync process because of leaf node changes below it.
- unrefs: a slice of BlockInfos that must be unreferenced as part of an eventual sync of this write. May be non-nil even if err != nil.
- newlyDirtiedChildBytes is the total amount of block data dirtied by this truncate, including the entire size of blocks that have had at least one byte dirtied. As above, it may be non-zero even if err != nil.
func (*FileData) UndupChildrenInCopy ¶
func (fd *FileData) UndupChildrenInCopy(ctx context.Context, bcache BlockCache, rp ReadyProvider, bps BlockPutState, topBlock *FileBlock) ([]BlockInfo, error)
UndupChildrenInCopy takes a top block that's been copied via deepCopy(), and un-deduplicates all leaf children of the block. It adds all child blocks to the provided `bps`, including both the ones that were deduplicated and the ones that weren't. It returns the BlockInfos for all children.
func (*FileData) Write ¶
func (fd *FileData) Write(ctx context.Context, data []byte, off Int64Offset, topBlock *FileBlock, oldDe DirEntry, df *DirtyFile) ( newDe DirEntry, dirtyPtrs []BlockPointer, unrefs []BlockInfo, newlyDirtiedChildBytes int64, bytesExtended int64, err error)
Write sets the given data and the given offset within the file, making new blocks and new levels of indirection as needed. Return params:
- newDe: a new directory entry with the EncodedSize cleared if the file was extended.
- dirtyPtrs: a slice of the BlockPointers that have been dirtied during the write. This includes any interior indirect blocks that may not have been changed yet, but which will need to change as part of the sync process because of leaf node changes below it.
- unrefs: a slice of BlockInfos that must be unreferenced as part of an eventual sync of this write. May be non-nil even if err != nil.
- newlyDirtiedChildBytes is the total amount of block data dirtied by this write, including the entire size of blocks that have had at least one byte dirtied. As above, it may be non-zero even if err != nil.
- bytesExtended is the number of bytes the length of the file has been extended as part of this write.
type FolderBranch ¶
type FolderBranch struct { Tlf tlf.ID Branch BranchName // master branch, by default }
FolderBranch represents a unique pair of top-level folder and a branch of that folder.
func (FolderBranch) String ¶
func (fb FolderBranch) String() string
type IndirectDirPtr ¶
type IndirectDirPtr struct { // TODO: Make sure that the block is not dirty when the EncodedSize // field is non-zero. BlockInfo Off StringOffset `codec:"o"` codec.UnknownFieldSetHandler }
IndirectDirPtr pairs an indirect dir block with the start of that block's range of directory entries (inclusive)
type IndirectFilePtr ¶
type IndirectFilePtr struct { // When the EncodedSize field is non-zero, the block must not // be dirty. BlockInfo Off Int64Offset `codec:"o"` // Marker for files with holes. This is here for historical // reasons; a `FileBlock` should be treated as having a `HasHoles` // flag set to true if any of its IPtrs have `Holes` set to true. Holes bool `codec:"h,omitempty"` codec.UnknownFieldSetHandler }
IndirectFilePtr pairs an indirect file block with the start of that block's range of bytes (inclusive)
If `Holes` is true, then this pointer is part of a list of pointers that has non-continuous offsets; that is, the offset of ptr `i` plus the length of the corresponding block contents is less than the offset of ptr `i`+1.
type Int64Offset ¶
type Int64Offset int64
Int64Offset represents the offset of a block within a file.
func (Int64Offset) Equals ¶
func (i Int64Offset) Equals(other Offset) bool
Equals implements the Offset interface for Int64Offset.
func (Int64Offset) Less ¶
func (i Int64Offset) Less(other Offset) bool
Less implements the Offset interface for Int64Offset.
func (Int64Offset) String ¶
func (i Int64Offset) String() string
type IsDirtyProvider ¶
type IsDirtyProvider interface { // IsDirty states whether or not the block associated with the // given block pointer and branch name is dirty in this cache. IsDirty(tlfID tlf.ID, ptr BlockPointer, branch BranchName) bool }
IsDirtyProvider defines a method for checking whether a given pointer is dirty.
type NameExistsError ¶
type NameExistsError struct {
Name string
}
NameExistsError indicates that the user tried to create an entry for a name that already existed in a subdirectory.
func (NameExistsError) Error ¶
func (e NameExistsError) Error() string
Error implements the error interface for NameExistsError
type NoSuchBlockError ¶
NoSuchBlockError indicates that a block for the associated ID doesn't exist.
func (NoSuchBlockError) Error ¶
func (e NoSuchBlockError) Error() string
Error implements the error interface for NoSuchBlockError
type NotDirectFileBlockError ¶
type NotDirectFileBlockError struct { }
NotDirectFileBlockError indicates that a direct file block was expected, but something else (e.g., an indirect file block) was given instead.
func (NotDirectFileBlockError) Error ¶
func (e NotDirectFileBlockError) Error() string
type Offset ¶
Offset is a generic representation of an offset to an indirect pointer within an indirect Block.
type ParentBlockAndChildIndex ¶
type ParentBlockAndChildIndex struct {
// contains filtered or unexported fields
}
ParentBlockAndChildIndex is a node on a path down the tree to a particular leaf node. `pblock` is an indirect block corresponding to one of that leaf node's parents, and `childIndex` is an index into `pblock.IPtrs` to the next node along the path.
type Path ¶
type Path struct { FolderBranch Path []PathNode }
Path represents the full KBFS path to a particular location, so that a flush can traverse backwards and fix up ids along the way.
func (Path) CanonicalPathString ¶
CanonicalPathString returns canonical representation of the full path, always prefaced by /keybase. This may require conversion to a platform specific path, for example, by replacing /keybase with the appropriate drive letter on Windows. It also, might need conversion if on a different run mode, for example, /keybase.staging on Unix type platforms.
func (Path) ChildPath ¶
func (p Path) ChildPath(name string, ptr BlockPointer) Path
ChildPath returns a new Path with the addition of a new entry with the given name and BlockPointer.
func (Path) ChildPathNoPtr ¶
ChildPathNoPtr returns a new Path with the addition of a new entry with the given name. That final PathNode will have no BlockPointer.
func (Path) DebugString ¶
DebugString returns a string representation of the path with all branch and pointer information.
func (Path) HasValidParent ¶
HasValidParent returns true if this path is valid and `ParentPath()` is a valid path.
func (Path) IsValidForNotification ¶
IsValidForNotification returns true if the path has at least one node (for the root), and the first element of the path is non-empty and does not start with "<", which indicates an unnotifiable path.
func (Path) ParentPath ¶
ParentPath returns a new Path representing the parent subdirectory of this Path. Must be called with a valid path. Should not be called with a path of only a single node, as that would produce an invalid path.
func (Path) TailName ¶
TailName returns the name of the final node in the Path. Must be called with a valid path.
func (Path) TailPointer ¶
func (p Path) TailPointer() BlockPointer
TailPointer returns the BlockPointer of the final node in the Path. Must be called with a valid path.
type PathNode ¶
type PathNode struct { BlockPointer Name string }
PathNode is a single node along an KBFS path, pointing to the top block for that node of the path.
func (PathNode) DebugString ¶
DebugString returns a string representation of the node with all pointer information.
type PrevRevisionAndCount ¶
type PrevRevisionAndCount struct { Revision kbfsmd.Revision `codec:"r"` Count uint8 `codec:"c"` codec.UnknownFieldSetHandler }
PrevRevisionAndCount track the MD version of a previous revision of a dir entry, and how many revisions ago that was from the current revision.
type PrevRevisions ¶
type PrevRevisions []PrevRevisionAndCount
PrevRevisions tracks several previous versions of a file in order of descending revision number, starting with the most recent.
func (PrevRevisions) AddRevision ¶
func (pr PrevRevisions) AddRevision( r, minRev kbfsmd.Revision) (ret PrevRevisions)
AddRevision returns a copy of `pr` with a new immediately-previous revision added, with the existing entries moved or overwritten to accomodate the new entry, and with increased counts. Any existing revisions smaller than or equal to minRev will be removed.
type ReadyBlockData ¶
type ReadyBlockData struct { // These fields should not be used outside of putBlockToServer. Buf []byte ServerHalf kbfscrypto.BlockCryptKeyServerHalf }
ReadyBlockData is a block that has been encoded (and encrypted).
func (ReadyBlockData) GetEncodedSize ¶
func (r ReadyBlockData) GetEncodedSize() int
GetEncodedSize returns the size of the encoded (and encrypted) block data.
type ReadyProvider ¶
type ReadyProvider interface { // Ready turns the given block (which belongs to the TLF with // the given key metadata) into encoded (and encrypted) data, // and calculates its ID and size, so that we can do a bunch // of block puts in parallel for every write. Ready() must // guarantee that plainSize <= readyBlockData.QuotaSize(). Ready(ctx context.Context, kmd libkey.KeyMetadata, block Block) ( id kbfsblock.ID, plainSize int, readyBlockData ReadyBlockData, err error) }
ReadyProvider defines a method for readying a block.
type ShutdownHappenedError ¶
type ShutdownHappenedError struct { }
ShutdownHappenedError indicates that shutdown has happened.
func (ShutdownHappenedError) Error ¶
func (e ShutdownHappenedError) Error() string
Error implements the error interface for ShutdownHappenedError.
type StringOffset ¶
type StringOffset string
StringOffset represents the offset of a block within a directory.
func (*StringOffset) Equals ¶
func (s *StringOffset) Equals(other Offset) bool
Equals implements the Offset interface for StringOffset.
func (*StringOffset) Less ¶
func (s *StringOffset) Less(other Offset) bool
Less implements the Offset interface for StringOffset.
func (*StringOffset) String ¶
func (s *StringOffset) String() string
type Ver ¶
type Ver int
Ver is the type of a version for marshalled KBFS data structures.
1) Ver is a per-block attribute, not per-file. This means that, in theory, an indirect block with DataVer n may point to blocks with Vers less than, equal to, or greater than n. However, for now, it's guaranteed that an indirect block will never point to blocks with greater versions than itself. (See #3 for details.)
2) Ver is an external attribute of a block, meaning that it's not stored as part of the block, but computed by the creator (or anyone with the latest kbfs client), and stored only in pointers to the block.
2.5) A file (or, in the future a dir) can in theory have any arbitrary tree structure of blocks. However, we only write files such that all paths to leaves have the same depth.
Currently, in addition to 2.5, we have the following constraints on block tree structures: a) Direct blocks are always v1. b) Indirect blocks of depth 2 (meaning one indirect block pointing to all direct blocks) can be v1 (if it has no holes) or v2 (if it has holes). However, all its indirect pointers will have Ver 1, by a). c) Indirect blocks of depth 3 must be v3 and must have at least one indirect pointer with an indirect DirectType [although if it holds for one, it should hold for all], although its indirect pointers may have any combination of Ver 1 or 2, by b). d) Indirect blocks of dept k > 3 must be v3 and must have at least one indirect pointer with an indirect DirectType [although if it holds for one, it should hold for all], and all of its indirect pointers must have Ver 3, by c).
const ( // FirstValidVer is the first value that is considered a // valid data version. Note that the nil value is not // considered valid. FirstValidVer Ver = 1 // ChildHolesVer is the data version for any indirect block // containing a set of pointers with holes. ChildHolesVer Ver = 2 // AtLeastTwoLevelsOfChildrenVer is the data version for // blocks that have multiple levels of indirection below them // (i.e., indirect blocks that point to other indirect blocks). AtLeastTwoLevelsOfChildrenVer Ver = 3 // IndirectDirsVer is the data version for a directory block // that contains indirect pointers. IndirectDirsVer Ver = 4 )
func DefaultNewBlockDataVersion ¶
DefaultNewBlockDataVersion returns the default data version for new blocks.