Documentation ¶
Overview ¶
Package desync implements data structures, protocols and features of https://github.com/systemd/casync in order to allow support for additional platforms and improve performace by way of concurrency and caching.
Supports the following casync data structures: catar archives, caibx/caidx index files, castr stores (local or remote).
See desync/cmd for reference implementations of the available features.
Index ¶
- Constants
- Variables
- func CanClone(dstFile, srcFile string) bool
- func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, ...) error
- func CloneRange(dst, src *os.File, srcOffset, srcLength, dstOffset uint64) error
- func Compress(b []byte) ([]byte, error)
- func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, ...) error
- func Decompress(out, in []byte) ([]byte, error)
- func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, pb ProgressBar) (Index, ChunkingStats, error)
- func MountIndex(ctx context.Context, idx Index, path, name string, s Store, n int) error
- func NewHTTPHandler(s Store, writable, skipVerifyWrite, uncompressed bool) http.Handler
- func NewHTTPIndexHandler(s IndexStore, writable bool) http.Handler
- func SipHash(b []byte) uint64
- func Tar(ctx context.Context, w io.Writer, src string) error
- func UnTar(ctx context.Context, r io.Reader, dst string, opts UntarOptions) error
- func UnTarIndex(ctx context.Context, dst string, index Index, s Store, n int, ...) error
- func VerifyIndex(ctx context.Context, name string, idx Index, n int, pb ProgressBar) error
- type ArchiveDecoder
- type Cache
- type Chunk
- type ChunkID
- type ChunkInvalid
- type ChunkMissing
- type ChunkStorage
- type Chunker
- type ChunkingStats
- type ConsoleIndexStore
- type ExtractStats
- type FileSeed
- type FormatACLDefault
- type FormatACLGroup
- type FormatACLGroupObj
- type FormatACLUser
- type FormatDecoder
- type FormatDevice
- type FormatEncoder
- type FormatEntry
- type FormatFCaps
- type FormatFilename
- type FormatGoodbye
- type FormatGoodbyeItem
- type FormatGroup
- type FormatHeader
- type FormatIndex
- type FormatPayload
- type FormatSELinux
- type FormatSymlink
- type FormatTable
- type FormatTableItem
- type FormatUser
- type FormatXAttr
- type HTTPHandler
- type HTTPHandlerBase
- type HTTPIndexHandler
- type Hash
- type Index
- type IndexChunk
- type IndexMountFS
- func (me *IndexMountFS) GetAttr(name string, context *fuse.Context) (*fuse.Attr, fuse.Status)
- func (me *IndexMountFS) Open(name string, flags uint32, context *fuse.Context) (file nodefs.File, code fuse.Status)
- func (me *IndexMountFS) OpenDir(name string, context *fuse.Context) (c []fuse.DirEntry, code fuse.Status)
- type IndexMountFile
- type IndexPos
- type IndexSegment
- type IndexStore
- type IndexWriteStore
- type Interrupted
- type InvalidFormat
- type LocalIndexStore
- type LocalStore
- func (s LocalStore) Close() error
- func (s LocalStore) GetChunk(id ChunkID) (*Chunk, error)
- func (s LocalStore) HasChunk(id ChunkID) bool
- func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
- func (s LocalStore) RemoveChunk(id ChunkID) error
- func (s LocalStore) StoreChunk(chunk *Chunk) error
- func (s LocalStore) String() string
- func (s LocalStore) Verify(ctx context.Context, n int, repair bool, w io.Writer) error
- type Message
- type NoSuchObject
- type NodeDevice
- type NodeDirectory
- type NodeFile
- type NodeSymlink
- type NullChunk
- type ProgressBar
- type Protocol
- func (p *Protocol) Initialize(flags uint64) (uint64, error)
- func (p *Protocol) ReadMessage() (Message, error)
- func (p *Protocol) RecvHello() (uint64, error)
- func (p *Protocol) RequestChunk(id ChunkID) (*Chunk, error)
- func (p *Protocol) SendGoodbye() error
- func (p *Protocol) SendHello(flags uint64) error
- func (p *Protocol) SendMissing(id ChunkID) error
- func (p *Protocol) SendProtocolChunk(id ChunkID, flags uint64, chunk []byte) error
- func (p *Protocol) SendProtocolRequest(id ChunkID, flags uint64) error
- func (p *Protocol) WriteMessage(m Message) error
- type ProtocolServer
- type PruneStore
- type RemoteHTTP
- type RemoteHTTPBase
- type RemoteHTTPIndex
- type RemoteSSH
- type S3IndexStore
- type S3Store
- type S3StoreBase
- type SFTPIndexStore
- type SFTPStore
- type SFTPStoreBase
- type Seed
- type SeedSegment
- type SeedSequencer
- type Store
- type StoreOptions
- type StoreRouter
- type UntarOptions
- type WriteStore
Constants ¶
const ( // Format identifiers used in archive files CaFormatEntry = 0x1396fabcea5bbb51 CaFormatUser = 0xf453131aaeeaccb3 CaFormatGroup = 0x25eb6ac969396a52 CaFormatXAttr = 0xb8157091f80bc486 CaFormatACLUser = 0x297dc88b2ef12faf CaFormatACLGroup = 0x36f2acb56cb3dd0b CaFormatACLGroupObj = 0x23047110441f38f3 CaFormatACLDefault = 0xfe3eeda6823c8cd0 CaFormatACLDefaultUser = 0xbdf03df9bd010a91 CaFormatACLDefaultGroup = 0xa0cb1168782d1f51 CaFormatFCaps = 0xf7267db0afed0629 CaFormatSELinux = 0x46faf0602fd26c59 CaFormatSymlink = 0x664a6fb6830e0d6c CaFormatDevice = 0xac3dace369dfe643 CaFormatPayload = 0x8b9e1d93d6dcffc9 CaFormatFilename = 0x6dbb6ebcb3161f0b CaFormatGoodbye = 0xdfd35c5e8327c403 CaFormatGoodbyeTailMarker = 0x57446fa533702943 CaFormatIndex = 0x96824d9c7b129ff9 CaFormatTable = 0xe75b9e112f17417d CaFormatTableTailMarker = 0x4b4f050e5549ecd1 // SipHash key used in Goodbye elements to hash the filename. It's 16 bytes, // split into 2x64bit values, upper and lower part of the key CaFormatGoodbyeHashKey0 = 0x8574442b0f1d84b3 CaFormatGoodbyeHashKey1 = 0x2736ed30d1c22ec1 // Format feature flags CaFormatWith16BitUIDs = 0x1 CaFormatWith32BitUIDs = 0x2 CaFormatWithUserNames = 0x4 CaFormatWithSecTime = 0x8 CaFormatWithUSecTime = 0x10 CaFormatWithNSecTime = 0x20 CaFormatWith2SecTime = 0x40 CaFormatWithReadOnly = 0x80 CaFormatWithPermissions = 0x100 CaFormatWithSymlinks = 0x200 CaFormatWithDeviceNodes = 0x400 CaFormatWithFIFOs = 0x800 CaFormatWithSockets = 0x1000 /* DOS file flags */ CaFormatWithFlagHidden = 0x2000 CaFormatWithFlagSystem = 0x4000 CaFormatWithFlagArchive = 0x8000 /* chattr() flags */ CaFormatWithFlagAppend = 0x10000 CaFormatWithFlagNoAtime = 0x20000 CaFormatWithFlagCompr = 0x40000 CaFormatWithFlagNoCow = 0x80000 CaFormatWithFlagNoDump = 0x100000 CaFormatWithFlagDirSync = 0x200000 CaFormatWithFlagImmutable = 0x400000 CaFormatWithFlagSync = 0x800000 CaFormatWithFlagNoComp = 0x1000000 CaFormatWithFlagProjectInherit = 0x2000000 /* btrfs magic */ CaFormatWithSubvolume = 0x4000000 CaFormatWithSubvolumeRO = 0x8000000 /* Extended Attribute metadata */ CaFormatWithXattrs = 0x10000000 CaFormatWithACL = 0x20000000 CaFormatWithSELinux = 0x40000000 CaFormatWithFcaps = 0x80000000 CaFormatSHA512256 = 0x2000000000000000 CaFormatExcludeSubmounts = 0x4000000000000000 CaFormatExcludeNoDump = 0x8000000000000000 // Protocol message types CaProtocolHello = 0x3c71d0948ca5fbee CaProtocolIndex = 0xb32a91dd2b3e27f8 CaProtocolIndexEOF = 0x4f0932f1043718f5 CaProtocolArchive = 0x95d6428a69eddcc5 CaProtocolArchiveEOF = 0x450bef663f24cbad CaProtocolRequest = 0x8ab427e0f89d9210 CaProtocolChunk = 0x5213dd180a84bc8c CaProtocolMissing = 0xd010f9fac82b7b6c CaProtocolGoodbye = 0xad205dbf1a3686c3 CaProtocolAbort = 0xe7d9136b7efea352 // Provided services CaProtocolReadableStore = 0x1 CaProtocolWritableStore = 0x2 CaProtocolReadableIndex = 0x4 CaProtocolWritableIndex = 0x8 CaProtocolReadableArchive = 0x10 CaProtocolWritableArchive = 0x20 // Wanted services CaProtocolPullChunks = 0x40 CaProtocolPullIndex = 0x80 CaProtocolPullArchive = 0x100 CaProtocolPushChunks = 0x200 CaProtocolPushIndex = 0x400 CaProtocolPushIndexChunks = 0x800 CaProtocolPushArchive = 0x1000 // Protocol request flags CaProtocolRequestHighPriority = 1 // Chunk properties CaProtocolChunkCompressed = 1 )
const ChunkerWindowSize = 48
ChunkerWindowSize is the number of bytes in the rolling hash window
const CompressedChunkExt = ".cacnk"
CompressedChunkExt is the file extension used for compressed chunks
const DefaultBlockSize = 4096
DefaultBlockSize is used when the actual filesystem block size cannot be determined automatically
const TarFeatureFlags uint64 = CaFormatWith32BitUIDs | CaFormatWithNSecTime | CaFormatWithPermissions | CaFormatWithSymlinks | CaFormatWithDeviceNodes | CaFormatWithFIFOs | CaFormatWithSockets | CaFormatSHA512256 | CaFormatExcludeNoDump
TarFeatureFlags are used as feature flags in the header of catar archives. These should be used in index files when chunking a catar as well. TODO: Find out what CaFormatWithPermissions is as that's not set incasync-produced catar archives.
const UncompressedChunkExt = ""
UncompressedChunkExt is the file extension of uncompressed chunks
Variables ¶
var ( FormatString = map[uint64]string{ CaFormatEntry: "CaFormatEntry", CaFormatUser: "CaFormatUser", CaFormatGroup: "CaFormatGroup", CaFormatXAttr: "CaFormatXAttr", CaFormatACLUser: "CaFormatACLUser", CaFormatACLGroup: "CaFormatACLGroup", CaFormatACLGroupObj: "CaFormatACLGroupObj", CaFormatACLDefault: "CaFormatACLDefault", CaFormatACLDefaultUser: "CaFormatACLDefaultUser", CaFormatACLDefaultGroup: "CaFormatACLDefaultGroup", CaFormatFCaps: "CaFormatFCaps", CaFormatSELinux: "CaFormatSELinux", CaFormatSymlink: "CaFormatSymlink", CaFormatDevice: "CaFormatDevice", CaFormatPayload: "CaFormatPayload", CaFormatFilename: "CaFormatFilename", CaFormatGoodbye: "CaFormatGoodbye", CaFormatGoodbyeTailMarker: "CaFormatGoodbyeTailMarker", CaFormatIndex: "CaFormatIndex", CaFormatTable: "CaFormatTable", CaFormatTableTailMarker: "CaFormatTableTailMarker", } )
var TrustInsecure bool
TrustInsecure determines if invalid certs presented by HTTP stores should be accepted.
Functions ¶
func CanClone ¶ added in v0.4.0
CanClone tries to determine if the filesystem allows cloning of blocks between two files. It'll create two tempfiles in the same dirs and attempt to perfom a 0-byte long block clone. If that's successful it'll return true.
func ChopFile ¶ added in v0.2.0
func ChopFile(ctx context.Context, name string, chunks []IndexChunk, ws WriteStore, n int, pb ProgressBar) error
ChopFile split a file according to a list of chunks obtained from an Index and stores them in the provided store
func CloneRange ¶ added in v0.4.0
CloneRange uses the FICLONERANGE ioctl to de-dupe blocks between two files when using XFS or btrfs. Only works at block-boundaries.
func Copy ¶ added in v0.2.0
func Copy(ctx context.Context, ids []ChunkID, src Store, dst WriteStore, n int, pb ProgressBar) error
Copy reads a list of chunks from the provided src store, and copies the ones not already present in the dst store. The goal is to load chunks from remote store to populate a cache. If progress is provided, it'll be called when a chunk has been processed. Used to draw a progress bar, can be nil.
func Decompress ¶ added in v0.2.0
Decompress a block using the only supported algorithm. If you already have a buffer it can be passed into out and will be used. If out=nil, a buffer will be allocated.
func IndexFromFile ¶ added in v0.2.0
func IndexFromFile(ctx context.Context, name string, n int, min, avg, max uint64, pb ProgressBar, ) (Index, ChunkingStats, error)
IndexFromFile chunks a file in parallel and returns an index. It does not store chunks! Each concurrent chunker starts filesize/n bytes apart and splits independently. Each chunk worker tries to sync with it's next neighbor and if successful stops processing letting the next one continue. The main routine reads and assembles a list of (confirmed) chunks from the workers, starting with the first worker. This algorithm wastes some CPU and I/O if the data doesn't contain chunk boundaries, for example if the whole file contains nil bytes. If progress is not nil, it'll be updated with the confirmed chunk position in the file.
func MountIndex ¶ added in v0.2.0
MountIndex mounts an index file under a FUSE mount point. The mount will only expose a single blob file as represented by the index.
func NewHTTPHandler ¶ added in v0.2.0
NewHTTPHandler initializes and returns a new HTTP handler for a chunks erver.
func NewHTTPIndexHandler ¶ added in v0.3.0
func NewHTTPIndexHandler(s IndexStore, writable bool) http.Handler
NewHTTPIndexHandler initializes an HTTP index store handler
func SipHash ¶ added in v0.2.0
SipHash is used to calculate the hash in Goodbye element items, hashing the filename.
func Tar ¶ added in v0.2.0
Tar implements the tar command which recursively parses a directory tree, and produces a stream of encoded casync format elements (catar file).
func UnTar ¶ added in v0.2.0
UnTar implements the untar command, decoding a catar file and writing the contained tree to a target directory.
func UnTarIndex ¶ added in v0.2.0
func UnTarIndex(ctx context.Context, dst string, index Index, s Store, n int, opts UntarOptions) error
UnTarIndex takes an index file (of a chunked catar), re-assembles the catar and decodes it on-the-fly into the target directory 'dst'. Uses n gorountines to retrieve and decompress the chunks.
func VerifyIndex ¶ added in v0.2.0
VerifyIndex re-calculates the checksums of a blob comparing it to a given index. Fails if the index does not match the blob.
Types ¶
type ArchiveDecoder ¶ added in v0.2.0
type ArchiveDecoder struct {
// contains filtered or unexported fields
}
ArchiveDecoder is used to decode a catar archive.
func NewArchiveDecoder ¶ added in v0.2.0
func NewArchiveDecoder(r io.Reader) ArchiveDecoder
NewArchiveDecoder initializes a decoder for a catar archive.
func (*ArchiveDecoder) Next ¶ added in v0.2.0
func (a *ArchiveDecoder) Next() (interface{}, error)
Next returns a node from an archive, or nil if the end is reached. If NodeFile is returned, the caller should read the file body before calling Next() again as that invalidates the reader.
type Cache ¶
type Cache struct {
// contains filtered or unexported fields
}
Cache is used to connect a (typically remote) store with a local store which functions as disk cache. Any request to the cache for a chunk will first be routed to the local store, and if that fails to the slower remote store. Any chunks retrieved from the remote store will be stored in the local one.
func NewCache ¶
func NewCache(s Store, l WriteStore) Cache
NewCache returns a cache router that uses a local store as cache before accessing a (supposedly slower) remote one.
func (Cache) GetChunk ¶
GetChunk first asks the local store for the chunk and then the remote one. If we get a chunk from the remote, it's stored locally too.
type Chunk ¶ added in v0.4.0
type Chunk struct {
// contains filtered or unexported fields
}
Chunk holds chunk data compressed, uncompressed, or both. If a chunk is created from compressed data, such as read from a compressed chunk store, and later the application requires the uncompressed data, it'll be decompressed on demand and also stored in Chunk. The same happens when the Chunk is made from uncompressed bytes and then to be stored in a compressed form.
func NewChunkFromUncompressed ¶ added in v0.4.0
NewChunkFromUncompressed creates a new chunk from uncompressed data.
func NewChunkWithID ¶ added in v0.4.0
NewChunkWithID creates a new chunk from either compressed or uncompressed data (or both if available). It also expects an ID and validates that it matches the uncompressed data unless skipVerify is true. If called with just compressed data, it'll decompress it for the ID validation.
func (*Chunk) Compressed ¶ added in v0.4.0
Compressed returns the chunk data in compressed form. If the chunk was created with uncompressed data only, it'll be compressed, stored and returned. The caller must not modify the data in the returned slice.
func (*Chunk) ID ¶ added in v0.4.0
ID returns the checksum/ID of the uncompressed chunk data. The ID is stored after the first call and doesn't need to be re-calculated. Note that calculating the ID may mean decompressing the data first.
func (*Chunk) Uncompressed ¶ added in v0.4.0
Uncompressed returns the chunk data in uncompressed form. If the chunk was created with compressed data only, it'll be decompressed, stored and returned. The caller must not modify the data in the returned slice.
type ChunkID ¶
type ChunkID [32]byte
ChunkID is the SHA512/256 in binary encoding
func ChunkIDFromSlice ¶
ChunkIDFromSlice converts a SHA512/256 encoded as byte slice into a ChunkID. It's expected the slice is of the correct length
func ChunkIDFromString ¶
ChunkIDFromString converts a SHA512/56 encoded as string into a ChunkID
type ChunkInvalid ¶ added in v0.2.0
ChunkInvalid means the hash of the chunk content doesn't match its ID
func (ChunkInvalid) Error ¶ added in v0.2.0
func (e ChunkInvalid) Error() string
type ChunkMissing ¶
type ChunkMissing struct {
ID ChunkID
}
ChunkMissing is returned by a store that can't find a requested chunk
func (ChunkMissing) Error ¶
func (e ChunkMissing) Error() string
type ChunkStorage ¶ added in v0.2.0
ChunkStorage stores chunks in a writable store. It can be safely used by multiple goroutines and contains an internal cache of what chunks have been store previously.
func NewChunkStorage ¶ added in v0.2.0
func NewChunkStorage(ws WriteStore) *ChunkStorage
NewChunkStorage initializes a ChunkStorage object.
func (*ChunkStorage) StoreChunk ¶ added in v0.2.0
func (s *ChunkStorage) StoreChunk(chunk *Chunk) (err error)
StoreChunk stores a single chunk in a synchronous manner.
type Chunker ¶ added in v0.2.0
type Chunker struct {
// contains filtered or unexported fields
}
Chunker is used to break up a data stream into chunks of data.
func NewChunker ¶ added in v0.2.0
NewChunker initializes a chunker for a data stream according to min/avg/max chunk size.
type ChunkingStats ¶ added in v0.2.0
ChunkingStats is used to report statistics of a parallel chunking operation.
type ConsoleIndexStore ¶ added in v0.3.0
type ConsoleIndexStore struct{}
ConsoleIndexStore is used for writing/reading indexes from STDOUT/STDIN
func NewConsoleIndexStore ¶ added in v0.3.0
func NewConsoleIndexStore() (ConsoleIndexStore, error)
NewConsoleIndexStore creates an instance of an indexStore that reads/writes to and from console
func (ConsoleIndexStore) Close ¶ added in v0.3.0
func (s ConsoleIndexStore) Close() error
Close the index store.
func (ConsoleIndexStore) GetIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndex(string) (i Index, e error)
GetIndex reads an index from STDIN and returns it.
func (ConsoleIndexStore) GetIndexReader ¶ added in v0.3.0
func (s ConsoleIndexStore) GetIndexReader(string) (io.ReadCloser, error)
GetIndexReader returns a reader from STDIN
func (ConsoleIndexStore) StoreIndex ¶ added in v0.3.0
func (s ConsoleIndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the provided indes to STDOUT. The name is ignored.
func (ConsoleIndexStore) String ¶ added in v0.3.0
func (s ConsoleIndexStore) String() string
type ExtractStats ¶ added in v0.4.0
type ExtractStats struct { ChunksTotal int `json:"chunks-total"` ChunksFromSeeds uint64 `json:"chunks-from-seeds"` ChunksFromStore uint64 `json:"chunks-from-store"` ChunksInPlace uint64 `json:"chunks-in-place"` BytesTotal int64 `json:"bytes-total"` BytesCopied uint64 `json:"bytes-copied-from-seeds"` BytesCloned uint64 `json:"bytes-cloned-from-seeds"` Seeds int `json:"seeds"` Blocksize uint64 `json:"blocksize"` }
ExtractStats contains detailed statistics about a file extract operation, such as if data chunks were copied from seeds or cloned.
func AssembleFile ¶ added in v0.2.0
func AssembleFile(ctx context.Context, name string, idx Index, s Store, seeds []Seed, n int, pb ProgressBar) (*ExtractStats, error)
AssembleFile re-assembles a file based on a list of index chunks. It runs n goroutines, creating one filehandle for the file "name" per goroutine and writes to the file simultaneously. If progress is provided, it'll be called when a chunk has been processed. If the input file exists and is not empty, the algorithm will first confirm if the data matches what is expected and only populate areas that differ from the expected content. This can be used to complete partly written files.
type FileSeed ¶ added in v0.4.0
type FileSeed struct {
// contains filtered or unexported fields
}
FileSeed is used to copy or clone blocks from an existing index+blob during file extraction.
func NewIndexSeed ¶ added in v0.4.0
NewIndexSeed initializes a new seed that uses an existing index and its blob
func (*FileSeed) LongestMatchWith ¶ added in v0.4.0
func (s *FileSeed) LongestMatchWith(chunks []IndexChunk) (int, SeedSegment)
LongestMatchWith returns the longest sequence of of chunks anywhere in Source that match b starting at b[0]. If there is no match, it returns nil
type FormatACLDefault ¶ added in v0.2.0
type FormatACLDefault struct { FormatHeader UserObjPermissions uint64 GroupObjPermissions uint64 OtherPermissions uint64 MaskPermissions uint64 }
type FormatACLGroup ¶ added in v0.2.0
type FormatACLGroup struct { FormatHeader GID uint64 Permissions uint64 Name string }
type FormatACLGroupObj ¶ added in v0.2.0
type FormatACLGroupObj struct { FormatHeader Permissions uint64 }
type FormatACLUser ¶ added in v0.2.0
type FormatACLUser struct { FormatHeader UID uint64 Permissions uint64 Name string }
type FormatDecoder ¶ added in v0.2.0
type FormatDecoder struct {
// contains filtered or unexported fields
}
FormatDecoder is used to parse and break up a stream of casync format elements found in archives or index files.
func NewFormatDecoder ¶ added in v0.2.0
func NewFormatDecoder(r io.Reader) FormatDecoder
func (*FormatDecoder) Next ¶ added in v0.2.0
func (d *FormatDecoder) Next() (interface{}, error)
Next returns the next format element from the stream. If an element contains a reader, that reader should be used before any subsequent calls as it'll be invalidated then. Returns nil when the end is reached.
type FormatDevice ¶ added in v0.2.0
type FormatDevice struct { FormatHeader Major uint64 Minor uint64 }
type FormatEncoder ¶ added in v0.2.0
type FormatEncoder struct {
// contains filtered or unexported fields
}
FormatEncoder takes casync format elements and encodes them into a stream.
func NewFormatEncoder ¶ added in v0.2.0
func NewFormatEncoder(w io.Writer) FormatEncoder
func (*FormatEncoder) Encode ¶ added in v0.2.0
func (e *FormatEncoder) Encode(v interface{}) (int64, error)
type FormatEntry ¶ added in v0.2.0
type FormatFCaps ¶ added in v0.2.0
type FormatFCaps struct { FormatHeader Data []byte }
type FormatFilename ¶ added in v0.2.0
type FormatFilename struct { FormatHeader Name string }
type FormatGoodbye ¶ added in v0.2.0
type FormatGoodbye struct { FormatHeader Items []FormatGoodbyeItem }
type FormatGoodbyeItem ¶ added in v0.2.0
type FormatGroup ¶ added in v0.2.0
type FormatGroup struct { FormatHeader Name string }
type FormatHeader ¶ added in v0.2.0
type FormatIndex ¶ added in v0.2.0
type FormatIndex struct { FormatHeader FeatureFlags uint64 ChunkSizeMin uint64 ChunkSizeAvg uint64 ChunkSizeMax uint64 }
type FormatPayload ¶ added in v0.2.0
type FormatPayload struct { FormatHeader Data io.Reader }
type FormatSELinux ¶ added in v0.2.0
type FormatSELinux struct { FormatHeader Label string }
type FormatSymlink ¶ added in v0.2.0
type FormatSymlink struct { FormatHeader Target string }
type FormatTable ¶ added in v0.2.0
type FormatTable struct { FormatHeader Items []FormatTableItem }
type FormatTableItem ¶ added in v0.2.0
type FormatUser ¶ added in v0.2.0
type FormatUser struct { FormatHeader Name string }
type FormatXAttr ¶ added in v0.2.0
type FormatXAttr struct { FormatHeader NameAndValue string }
type HTTPHandler ¶ added in v0.2.0
type HTTPHandler struct { HTTPHandlerBase SkipVerifyWrite bool Uncompressed bool // contains filtered or unexported fields }
HTTPHandler is the server-side handler for a HTTP chunk store.
func (HTTPHandler) ServeHTTP ¶ added in v0.2.0
func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type HTTPHandlerBase ¶ added in v0.3.0
type HTTPHandlerBase struct {
// contains filtered or unexported fields
}
HTTPHandlerBase is the base object for a HTTP chunk or index store.
type HTTPIndexHandler ¶ added in v0.3.0
type HTTPIndexHandler struct { HTTPHandlerBase // contains filtered or unexported fields }
HTTPIndexHandler is the HTTP handler for index stores.
func (HTTPIndexHandler) ServeHTTP ¶ added in v0.3.0
func (h HTTPIndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request)
type Hash ¶ added in v0.2.0
type Hash struct {
// contains filtered or unexported fields
}
Hash implements the rolling hash algorithm used to find chunk bounaries in a stream of bytes.
func NewHash ¶ added in v0.2.0
NewHash returns a new instance of a hash. size determines the length of the hash window used and the discriminator is used to find the boundary.
func (*Hash) Initialize ¶ added in v0.2.0
Initialize the window used for the rolling hash calculation. The size of the slice must match the window size
func (*Hash) IsBoundary ¶ added in v0.2.0
IsBoundary returns true if the discriminator and hash match to signal a chunk boundary has been reached
type Index ¶
type Index struct { Index FormatIndex Chunks []IndexChunk }
Index represents the content of an index file
func ChunkStream ¶ added in v0.2.0
ChunkStream splits up a blob into chunks using the provided chunker (single stream), populates a store with the chunks and returns an index. Hashing and compression is performed in n goroutines while the hashing algorithm is performed serially.
func IndexFromReader ¶ added in v0.2.0
IndexFromReader parses a caibx structure (from a reader) and returns a populated Caibx object
type IndexChunk ¶ added in v0.2.0
IndexChunk is a table entry in an index file containing the chunk ID (SHA256) Similar to an FormatTableItem but with Start and Size instead of just offset to make it easier to use throughout the application.
type IndexMountFS ¶ added in v0.2.0
type IndexMountFS struct { FName string // File name in the mountpoint Idx Index // Index of the blob Store Store pathfs.FileSystem }
IndexMountFS is used to FUSE mount an index file (as a blob, not an archive). It present a single file underneath the mountpoint.
func NewIndexMountFS ¶ added in v0.2.0
func NewIndexMountFS(idx Index, name string, s Store) *IndexMountFS
NewIndexMountFS initializes a FUSE filesystem mount based on an index and a chunk store.
func (*IndexMountFS) GetAttr ¶ added in v0.2.0
GetAttr returns file attributes of a file or directory in a FUSE mount.
type IndexMountFile ¶ added in v0.2.0
IndexMountFile represents a (read-only) file handle on a blob in a FUSE mounted filesystem
func NewIndexMountFile ¶ added in v0.2.0
func NewIndexMountFile(idx Index, s Store) *IndexMountFile
NewIndexMountFile initializes a blob file opened in a FUSE mount.
func (*IndexMountFile) GetAttr ¶ added in v0.2.0
func (f *IndexMountFile) GetAttr(out *fuse.Attr) fuse.Status
GetAttr returns attributes of a blob file in a FUSE mount.
func (*IndexMountFile) Read ¶ added in v0.2.0
func (f *IndexMountFile) Read(dest []byte, off int64) (fuse.ReadResult, fuse.Status)
Read from a blob file in a FUSE mount.
type IndexPos ¶ added in v0.2.0
type IndexPos struct { Store Store Index Index Length int64 // total length of file // contains filtered or unexported fields }
IndexPos represents a position inside an index file, to permit a seeking reader
func NewIndexReadSeeker ¶ added in v0.2.0
NewIndexReadSeeker initializes a ReadSeeker for indexes.
type IndexSegment ¶ added in v0.4.0
type IndexSegment struct {
// contains filtered or unexported fields
}
IndexSegment represents a contiguous section of an index which is used when assembling a file from seeds. first/last are positions in the index.
type IndexStore ¶ added in v0.3.0
type IndexStore interface { GetIndexReader(name string) (io.ReadCloser, error) GetIndex(name string) (Index, error) io.Closer fmt.Stringer }
IndexStore is implemented by stores that hold indexes.
type IndexWriteStore ¶ added in v0.3.0
type IndexWriteStore interface { IndexStore StoreIndex(name string, idx Index) error }
IndexWriteStore is used by stores that support reading and writing of indexes.
type Interrupted ¶ added in v0.2.0
type Interrupted struct{}
Interrupted is returned when a user interrupted a long-running operation, for example by pressing Ctrl+C
func (Interrupted) Error ¶ added in v0.2.0
func (e Interrupted) Error() string
type InvalidFormat ¶ added in v0.2.0
type InvalidFormat struct {
Msg string
}
InvalidFormat is returned when an error occurred when parsing an archive file
func (InvalidFormat) Error ¶ added in v0.2.0
func (e InvalidFormat) Error() string
type LocalIndexStore ¶ added in v0.3.0
type LocalIndexStore struct {
Path string
}
LocalIndexStore is used to read/write index files on local disk
func NewLocalIndexStore ¶ added in v0.4.0
func NewLocalIndexStore(path string) (LocalIndexStore, error)
NewLocalIndexStore creates an instance of a local castore, it only checks presence of the store
func (LocalIndexStore) Close ¶ added in v0.3.0
func (s LocalIndexStore) Close() error
Close the index store. NOP operation, needed to implement IndexStore interface
func (LocalIndexStore) GetIndex ¶ added in v0.3.0
func (s LocalIndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (LocalIndexStore) GetIndexReader ¶ added in v0.3.0
func (s LocalIndexStore) GetIndexReader(name string) (rdr io.ReadCloser, e error)
GetIndexReader returns a reader of an index file in the store or an error if the specified index file does not exist.
func (LocalIndexStore) StoreIndex ¶ added in v0.3.0
func (s LocalIndexStore) StoreIndex(name string, idx Index) error
StoreIndex stores an index in the index store with the given name.
func (LocalIndexStore) String ¶ added in v0.3.0
func (s LocalIndexStore) String() string
type LocalStore ¶
type LocalStore struct { Base string // When accessing chunks, should mtime be updated? Useful when this is // a cache. Old chunks can be identified and removed from the store that way UpdateTimes bool // contains filtered or unexported fields }
LocalStore casync store
func NewLocalStore ¶
func NewLocalStore(dir string, opt StoreOptions) (LocalStore, error)
NewLocalStore creates an instance of a local castore, it only checks presence of the store
func (LocalStore) Close ¶ added in v0.2.0
func (s LocalStore) Close() error
Close the store. NOP opertation, needed to implement Store interface.
func (LocalStore) GetChunk ¶
func (s LocalStore) GetChunk(id ChunkID) (*Chunk, error)
GetChunk reads and returns one (compressed!) chunk from the store
func (LocalStore) HasChunk ¶ added in v0.2.0
func (s LocalStore) HasChunk(id ChunkID) bool
HasChunk returns true if the chunk is in the store
func (LocalStore) Prune ¶ added in v0.2.0
func (s LocalStore) Prune(ctx context.Context, ids map[ChunkID]struct{}) error
Prune removes any chunks from the store that are not contained in a list of chunks
func (LocalStore) RemoveChunk ¶ added in v0.2.0
func (s LocalStore) RemoveChunk(id ChunkID) error
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (LocalStore) StoreChunk ¶
func (s LocalStore) StoreChunk(chunk *Chunk) error
StoreChunk adds a new chunk to the store
func (LocalStore) String ¶ added in v0.2.0
func (s LocalStore) String() string
type NoSuchObject ¶ added in v0.3.0
type NoSuchObject struct {
// contains filtered or unexported fields
}
NoSuchObject is returned by a store that can't find a requested object
func (NoSuchObject) Error ¶ added in v0.3.0
func (e NoSuchObject) Error() string
type NodeDevice ¶ added in v0.2.0
type NodeDevice struct { Name string UID int GID int Mode os.FileMode Major uint64 Minor uint64 MTime time.Time }
NodeDevice holds device information in a catar archive
type NodeDirectory ¶ added in v0.2.0
NodeDirectory represents a directory in a catar archive
type NodeFile ¶ added in v0.2.0
type NodeFile struct { UID int GID int Mode os.FileMode Name string MTime time.Time Data io.Reader }
NodeFile holds file permissions and data in a catar archive
type NodeSymlink ¶ added in v0.2.0
type NodeSymlink struct { Name string UID int GID int Mode os.FileMode MTime time.Time Target string }
NodeSymlink holds symlink information in a catar archive
type NullChunk ¶ added in v0.2.0
NullChunk is used in places where it's common to see requests for chunks containing only 0-bytes. When a chunked file has large areas of 0-bytes, the chunking algorithm does not produce split boundaries, which results in many chunks of 0-bytes of size MAX (max chunk size). The NullChunk can be used to make requesting this kind of chunk more efficient by serving it from memory, rather that request it from disk or network and decompress it repeatedly.
func NewNullChunk ¶ added in v0.2.0
NewNullChunk returns an initialized chunk consisting of 0-bytes of 'size' which must mach the max size used in the index to be effective
type ProgressBar ¶ added in v0.2.0
type ProgressBar interface { SetTotal(total int) Start() Finish() Increment() int Add(add int) int Set(current int) }
ProgressBar allows clients to provide their own implementations of graphical progress visualizations. Optional, can be nil to disable this feature.
type Protocol ¶ added in v0.2.0
type Protocol struct {
// contains filtered or unexported fields
}
Protocol handles the casync protocol when using remote stores via SSH
func NewProtocol ¶ added in v0.2.0
NewProtocol creates a new casync protocol handler
func StartProtocol ¶ added in v0.2.0
StartProtocol initiates a connection to the remote store server using the value in CASYNC_SSH_PATH (default "ssh"), and executes the command in CASYNC_REMOTE_PATH (default "casync"). It then performs the HELLO handshake to initialze the connection
func (*Protocol) Initialize ¶ added in v0.2.0
Initialize exchanges HELLOs with the other side to start a protocol session. Returns the (capability) flags provided by the other party.
func (*Protocol) ReadMessage ¶ added in v0.2.0
ReadMessage reads a generic message from the other end, verifies the length, extracts the type and returns the message body as byte slice
func (*Protocol) RecvHello ¶ added in v0.2.0
RecvHello waits for the server to send a HELLO, fails if anything else is received. Returns the flags provided by the server.
func (*Protocol) RequestChunk ¶ added in v0.2.0
RequestChunk sends a request for a specific chunk to the server, waits for the response and returns the bytes in the chunk. Returns an error if the server reports the chunk as missing
func (*Protocol) SendGoodbye ¶ added in v0.2.0
SendGoodbye tells the other side to terminate gracefully
func (*Protocol) SendHello ¶ added in v0.2.0
SendHello sends a HELLO message to the server, with the flags signaling which service is being requested from it.
func (*Protocol) SendMissing ¶ added in v0.2.0
SendMissing tells the client that the requested chunk is not available
func (*Protocol) SendProtocolChunk ¶ added in v0.2.0
SendProtocolChunk responds to a request with the content of a chunk
func (*Protocol) SendProtocolRequest ¶ added in v0.2.0
SendProtocolRequest requests a chunk from a server
func (*Protocol) WriteMessage ¶ added in v0.2.0
WriteMessage sends a generic message to the server
type ProtocolServer ¶ added in v0.2.0
type ProtocolServer struct {
// contains filtered or unexported fields
}
ProtocolServer serves up chunks from a local store using the casync protocol
func NewProtocolServer ¶ added in v0.2.0
NewProtocolServer returns an initialized server that can serve chunks from a chunk store via the casync protocol
type PruneStore ¶ added in v0.2.0
type PruneStore interface { WriteStore Prune(ctx context.Context, ids map[ChunkID]struct{}) error }
PruneStore is a store that supports pruning of chunks
type RemoteHTTP ¶ added in v0.2.0
type RemoteHTTP struct {
*RemoteHTTPBase
}
RemoteHTTP is a remote casync store accessed via HTTP.
func NewRemoteHTTPStore ¶ added in v0.2.0
func NewRemoteHTTPStore(location *url.URL, opt StoreOptions) (*RemoteHTTP, error)
NewRemoteHTTPStore initializes a new store that pulls chunks via HTTP(S) from a remote web server. n defines the size of idle connections allowed.
func (*RemoteHTTP) GetChunk ¶ added in v0.2.0
func (r *RemoteHTTP) GetChunk(id ChunkID) (*Chunk, error)
GetChunk reads and returns one chunk from the store
func (*RemoteHTTP) HasChunk ¶ added in v0.2.0
func (r *RemoteHTTP) HasChunk(id ChunkID) bool
HasChunk returns true if the chunk is in the store
func (*RemoteHTTP) StoreChunk ¶ added in v0.2.0
func (r *RemoteHTTP) StoreChunk(chunk *Chunk) error
StoreChunk adds a new chunk to the store
type RemoteHTTPBase ¶ added in v0.3.0
type RemoteHTTPBase struct {
// contains filtered or unexported fields
}
RemoteHTTPBase is the base object for a remote, HTTP-based chunk or index stores.
func NewRemoteHTTPStoreBase ¶ added in v0.3.0
func NewRemoteHTTPStoreBase(location *url.URL, opt StoreOptions) (*RemoteHTTPBase, error)
NewRemoteHTTPStoreBase initializes a base object for HTTP index or chunk stores.
func (*RemoteHTTPBase) Close ¶ added in v0.3.0
func (r *RemoteHTTPBase) Close() error
Close the HTTP store. NOP operation but needed to implement the interface.
func (*RemoteHTTPBase) GetObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) GetObject(name string) ([]byte, error)
GetObject reads and returns an object in the form of []byte from the store
func (*RemoteHTTPBase) StoreObject ¶ added in v0.3.0
func (r *RemoteHTTPBase) StoreObject(name string, rdr io.Reader) error
StoreObject stores an object to the store.
func (*RemoteHTTPBase) String ¶ added in v0.3.0
func (r *RemoteHTTPBase) String() string
type RemoteHTTPIndex ¶ added in v0.3.0
type RemoteHTTPIndex struct {
*RemoteHTTPBase
}
RemoteHTTPIndex is a remote index store accessed via HTTP.
func NewRemoteHTTPIndexStore ¶ added in v0.3.0
func NewRemoteHTTPIndexStore(location *url.URL, opt StoreOptions) (*RemoteHTTPIndex, error)
NewRemoteHTTPIndexStore initializes a new store that pulls the specified index file via HTTP(S) from a remote web server.
func (*RemoteHTTPIndex) GetIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (RemoteHTTPIndex) GetIndexReader ¶ added in v0.3.0
func (r RemoteHTTPIndex) GetIndexReader(name string) (rdr io.ReadCloser, e error)
GetIndexReader returns an index reader from an HTTP store. Fails if the specified index file does not exist.
func (*RemoteHTTPIndex) StoreIndex ¶ added in v0.3.0
func (r *RemoteHTTPIndex) StoreIndex(name string, idx Index) error
StoreIndex adds a new chunk to the store
type RemoteSSH ¶
type RemoteSSH struct {
// contains filtered or unexported fields
}
RemoteSSH is a remote casync store accessed via SSH. Supports running multiple sessions to improve throughput.
func NewRemoteSSHStore ¶
func NewRemoteSSHStore(location *url.URL, opt StoreOptions) (*RemoteSSH, error)
NewRemoteSSHStore establishes up to n connections with a casync chunk server
func (*RemoteSSH) GetChunk ¶
GetChunk requests a chunk from the server and returns a (compressed) one. It uses any of the n sessions this store maintains in its pool. Blocks until one session becomes available
type S3IndexStore ¶ added in v0.3.0
type S3IndexStore struct {
S3StoreBase
}
S3IndexStore is a read-write index store with S3 backing
func NewS3IndexStore ¶ added in v0.3.0
func NewS3IndexStore(location *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions) (s S3IndexStore, e error)
NewS3IndexStore creates an index store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3IndexStore) GetIndex ¶ added in v0.3.0
func (s S3IndexStore) GetIndex(name string) (i Index, e error)
GetIndex returns an Index structure from the store
func (S3IndexStore) GetIndexReader ¶ added in v0.3.0
func (s S3IndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
GetIndexReader returns a reader for an index from an S3 store. Fails if the specified index file does not exist.
func (S3IndexStore) StoreIndex ¶ added in v0.3.0
func (s S3IndexStore) StoreIndex(name string, idx Index) error
StoreIndex writes the index file to the S3 store
type S3Store ¶ added in v0.2.0
type S3Store struct {
S3StoreBase
}
S3Store is a read-write store with S3 backing
func NewS3Store ¶ added in v0.2.0
func NewS3Store(location *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions) (s S3Store, e error)
NewS3Store creates a chunk store with S3 backing. The URL should be provided like this: s3+http://host:port/bucket Credentials are passed in via the environment variables S3_ACCESS_KEY and S3S3_SECRET_KEY, or via the desync config file.
func (S3Store) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list (map)
func (S3Store) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (S3Store) StoreChunk ¶ added in v0.2.0
StoreChunk adds a new chunk to the store
type S3StoreBase ¶ added in v0.3.0
type S3StoreBase struct { Location string // contains filtered or unexported fields }
S3StoreBase is the base object for all chunk and index stores with S3 backing
func NewS3StoreBase ¶ added in v0.3.0
func NewS3StoreBase(u *url.URL, s3Creds *credentials.Credentials, region string, opt StoreOptions) (S3StoreBase, error)
NewS3StoreBase initializes a base object used for chunk or index stores backed by S3.
func (S3StoreBase) Close ¶ added in v0.3.0
func (s S3StoreBase) Close() error
Close the S3 base store. NOP opertation but needed to implement the store interface.
func (S3StoreBase) String ¶ added in v0.3.0
func (s S3StoreBase) String() string
type SFTPIndexStore ¶ added in v0.3.0
type SFTPIndexStore struct {
*SFTPStoreBase
}
SFTPIndexStore is an index store backed by SFTP over SSH
func NewSFTPIndexStore ¶ added in v0.3.0
func NewSFTPIndexStore(location *url.URL, opt StoreOptions) (*SFTPIndexStore, error)
NewSFTPIndexStore initializes and index store backed by SFTP over SSH.
func (*SFTPIndexStore) GetIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndex(name string) (i Index, e error)
GetIndex reads an index from an SFTP store, returns an error if the specified index file does not exist.
func (*SFTPIndexStore) GetIndexReader ¶ added in v0.3.0
func (s *SFTPIndexStore) GetIndexReader(name string) (r io.ReadCloser, e error)
GetIndexReader returns a reader of an index from an SFTP store. Fails if the specified index file does not exist.
func (*SFTPIndexStore) StoreIndex ¶ added in v0.3.0
func (s *SFTPIndexStore) StoreIndex(name string, idx Index) error
StoreIndex adds a new index to the store
type SFTPStore ¶ added in v0.2.0
type SFTPStore struct {
*SFTPStoreBase
}
SFTPStore is a chunk store that uses SFTP over SSH.
func NewSFTPStore ¶ added in v0.2.0
func NewSFTPStore(location *url.URL, opt StoreOptions) (*SFTPStore, error)
NewSFTPStore initializes a chunk store using SFTP over SSH.
func (*SFTPStore) GetChunk ¶ added in v0.2.0
GetChunk returns a chunk from an SFTP store, returns ChunkMissing if the file does not exist
func (*SFTPStore) Prune ¶ added in v0.2.0
Prune removes any chunks from the store that are not contained in a list of chunks
func (*SFTPStore) RemoveChunk ¶ added in v0.2.0
RemoveChunk deletes a chunk, typically an invalid one, from the filesystem. Used when verifying and repairing caches.
func (*SFTPStore) StoreChunk ¶ added in v0.2.0
StoreChunk adds a new chunk to the store
type SFTPStoreBase ¶ added in v0.3.0
type SFTPStoreBase struct {
// contains filtered or unexported fields
}
SFTPStoreBase is the base object for SFTP chunk and index stores.
func (*SFTPStoreBase) Close ¶ added in v0.3.0
func (s *SFTPStoreBase) Close() error
Close terminates all client connections
func (*SFTPStoreBase) StoreObject ¶ added in v0.3.0
func (s *SFTPStoreBase) StoreObject(name string, r io.Reader) error
StoreObject adds a new object to a writable index or chunk store.
func (*SFTPStoreBase) String ¶ added in v0.3.0
func (s *SFTPStoreBase) String() string
type Seed ¶ added in v0.4.0
type Seed interface {
LongestMatchWith(chunks []IndexChunk) (int, SeedSegment)
}
Seed represent a source of chunks other than the store. Typically a seed is another index+blob that present on disk already and is used to copy or clone existing chunks or blocks into the target from.
type SeedSegment ¶ added in v0.4.0
type SeedSegment interface { Size() uint64 WriteInto(dst *os.File, offset, end, blocksize uint64, isBlank bool) (copied uint64, cloned uint64, err error) }
SeedSegment represents a matching range between a Seed and a a file being assembled from an Index. It's used to copy or reflink data from seeds into a target file during an extract operation.
type SeedSequencer ¶ added in v0.4.0
type SeedSequencer struct {
// contains filtered or unexported fields
}
SeedSequencer is used to find sequences of chunks from seed files when assembling a file from an index. Using seeds reduces the need to download and decompress chunks from chunk stores. It also enables the use of reflinking/cloning of sections of files from a seed file where supported to reduce disk usage.
func NewSeedSequencer ¶ added in v0.4.0
func NewSeedSequencer(idx Index, src ...Seed) *SeedSequencer
NewSeedSequencer initializes a new sequencer from a number of seeds.
func (*SeedSequencer) Next ¶ added in v0.4.0
func (r *SeedSequencer) Next() (segment IndexSegment, source SeedSegment, done bool)
Next returns a sequence of index chunks (from the target index) and the longest matching segment from one of the seeds. If source is nil, no match was found in the seeds and the chunk needs to be retrieved from a store. If done is true, the sequencer is complete.
type Store ¶
type Store interface { GetChunk(id ChunkID) (*Chunk, error) HasChunk(id ChunkID) bool io.Closer fmt.Stringer }
Store is a generic interface implemented by read-only stores, like SSH or HTTP remote stores currently.
type StoreOptions ¶ added in v0.4.0
type StoreOptions struct { // Concurrency used in the store. Depending on store type, it's used for // the number of goroutines, processes, or connection pool size. N int `json:"n,omitempty"` // Cert file name for HTTP SSL connections that require mutual SSL. ClientCert string `json:"client-cert,omitempty"` // Key file name for HTTP SSL connections that require mutual SSL. ClientKey string `json:"client-key,omitempty"` // Timeout for waiting for objects to be retrieved. Default: 1 minute Timeout time.Duration `json:"timeout,omitempty"` // Number of times object retrieval should be attempted on error. Useful when dealing // with unreliable connections. Default: 0 ErrorRetry int `json:"error-retry,omitempty"` // If SkipVerify is true, this store will not verfiy the data it reads and serves up. This is // helpful when a store is merely a proxy and the data will pass through additional stores // before being used. Verifying the checksum of a chunk requires it be uncompressed, so if // a compressed chunkstore is being proxied, all chunks would have to be decompressed first. // This setting avoids the extra overhead. While this could be used in other cases, it's not // recommended as a damaged chunk might be processed further leading to unpredictable results. SkipVerify bool `json:"skip-verify,omitempty"` // Store and read chunks uncompressed, without chunk file extension Uncompressed bool `json:"uncompressed"` }
StoreOptions provide additional common settings used in chunk stores, such as compression error retry or timeouts. Not all options available are applicable to all types of stores.
type StoreRouter ¶ added in v0.2.0
type StoreRouter struct {
Stores []Store
}
StoreRouter is used to route requests to multiple stores. When a chunk is requested from the router, it'll query the first store and if that returns ChunkMissing, it'll move on to the next.
func NewStoreRouter ¶ added in v0.2.0
func NewStoreRouter(stores ...Store) StoreRouter
NewStoreRouter returns an initialized router
func (StoreRouter) Close ¶ added in v0.2.0
func (r StoreRouter) Close() error
Close calls the Close() method on every store in the router. Returns only the first error encountered.
func (StoreRouter) GetChunk ¶ added in v0.2.0
func (r StoreRouter) GetChunk(id ChunkID) (*Chunk, error)
GetChunk queries the available stores in order and moves to the next if it gets a ChunkMissing. Fails if any store returns a different error.
func (StoreRouter) HasChunk ¶ added in v0.2.0
func (r StoreRouter) HasChunk(id ChunkID) bool
HasChunk returns true if one of the containing stores has the chunk. It goes through the stores in order and returns as soon as the chunk is found.
func (StoreRouter) String ¶ added in v0.2.0
func (r StoreRouter) String() string
type UntarOptions ¶ added in v0.2.0
UntarOptions are used to influence the behaviour of untar
type WriteStore ¶ added in v0.2.0
WriteStore is implemented by stores supporting both read and write operations such as a local store or an S3 store.
Source Files ¶
- archive.go
- assemble.go
- blocksize.go
- cache.go
- chop.go
- chunk.go
- chunker.go
- chunkstorage.go
- compress.go
- consoleindex.go
- const.go
- copy.go
- doc.go
- errors.go
- extractstats.go
- fileseed.go
- format.go
- httphandler.go
- httphandlerbase.go
- httpindexhandler.go
- index.go
- ioctl_linux.go
- local.go
- localindex.go
- make.go
- mount-index.go
- nullchunk.go
- nullseed.go
- progress.go
- protocol.go
- protocolserver.go
- reader.go
- readseeker.go
- remotehttp.go
- remotehttpindex.go
- remotessh.go
- s3.go
- s3index.go
- seed.go
- selfseed.go
- sequencer.go
- sftp.go
- sftpindex.go
- sip.go
- store.go
- storerouter.go
- tar.go
- types.go
- untar.go
- verifyindex.go
- writer.go