Documentation ¶
Overview ¶
Package restic is the top level package for the restic backup program, please see https://github.com/restic/restic for more information.
This package exposes the main objects that are handled in restic.
Index ¶
- Constants
- Variables
- func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason)
- func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs findBlobSet, ...) error
- func FixTime(t time.Time) time.Time
- func ForAllLocks(ctx context.Context, repo Repository, excludeID *ID, ...) error
- func ForAllSnapshots(ctx context.Context, be Lister, loader LoaderUnpacked, excludeIDs IDSet, ...) error
- func Getxattr(path, name string) ([]byte, error)
- func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error)
- func IsAlreadyLocked(err error) bool
- func IsInvalidLock(err error) bool
- func Listxattr(path string) ([]string, error)
- func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id ID, item interface{}) (err error)
- func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, ...) error
- func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error)
- func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error)
- func SaveConfig(ctx context.Context, r SaverUnpacked, cfg Config) error
- func Setxattr(path, name string, data []byte) error
- func StreamTrees(ctx context.Context, wg *errgroup.Group, repo Loader, trees IDs, ...) <-chan TreeItem
- func TestDisableCheckPolynomial(t testing.TB)
- func TestSetLockTimeout(t testing.TB, d time.Duration)
- func TestSetSnapshotID(_ testing.TB, sn *Snapshot, id ID)
- func ZeroPrefixLen(p []byte) (n int)
- type Blob
- type BlobHandle
- type BlobHandles
- type BlobLoader
- type BlobSaver
- type BlobSet
- func (s BlobSet) Delete(h BlobHandle)
- func (s BlobSet) Equals(other BlobSet) bool
- func (s BlobSet) Has(h BlobHandle) bool
- func (s BlobSet) Insert(h BlobHandle)
- func (s BlobSet) Intersect(other BlobSet) (result BlobSet)
- func (s BlobSet) Len() int
- func (s BlobSet) List() BlobHandles
- func (s BlobSet) Merge(other BlobSet)
- func (s BlobSet) String() string
- func (s BlobSet) Sub(other BlobSet) (result BlobSet)
- type BlobType
- type ByteReader
- type Config
- type CountedBlobSet
- type Duration
- type ExpirePolicy
- type ExtendedAttribute
- type FileReader
- type FileType
- type ID
- func Find(ctx context.Context, be Lister, t FileType, prefix string) (ID, error)
- func FindTreeDirectory(ctx context.Context, repo BlobLoader, id *ID, dir string) (*ID, error)
- func Hash(data []byte) ID
- func IDFromHash(hash []byte) (id ID)
- func NewRandomID() ID
- func ParseID(s string) (ID, error)
- func SaveJSONUnpacked(ctx context.Context, repo SaverUnpacked, t FileType, item interface{}) (ID, error)
- func SaveSnapshot(ctx context.Context, repo SaverUnpacked, sn *Snapshot) (ID, error)
- func SaveTree(ctx context.Context, r BlobSaver, t *Tree) (ID, error)
- func TestParseID(s string) ID
- type IDSet
- func (s IDSet) Delete(id ID)
- func (s IDSet) Equals(other IDSet) bool
- func (s IDSet) Has(id ID) bool
- func (s IDSet) Insert(id ID)
- func (s IDSet) Intersect(other IDSet) (result IDSet)
- func (s IDSet) List() IDs
- func (s IDSet) Merge(other IDSet)
- func (s IDSet) String() string
- func (s IDSet) Sub(other IDSet) (result IDSet)
- type IDs
- type JSONUnpackedLoader
- type KeepReason
- type Lister
- type Loader
- type LoaderUnpacked
- type Lock
- type MasterIndex
- type MultipleIDMatchesError
- type NoIDByPrefixError
- type Node
- func (node *Node) CreateAt(ctx context.Context, path string, repo Repository) error
- func (node Node) Equals(other Node) bool
- func (node Node) GetExtendedAttribute(a string) []byte
- func (node Node) MarshalJSON() ([]byte, error)
- func (node Node) RestoreMetadata(path string) error
- func (node Node) RestoreTimestamps(path string) error
- func (node Node) String() string
- func (node *Node) UnmarshalJSON(data []byte) error
- type Nodes
- type PackBlobs
- type PackedBlob
- type Repository
- type RewindReader
- type SaverUnpacked
- type Snapshot
- func FindSnapshot(ctx context.Context, be Lister, loader LoaderUnpacked, s string) (*Snapshot, string, error)
- func LoadSnapshot(ctx context.Context, loader LoaderUnpacked, id ID) (*Snapshot, error)
- func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error)
- func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int) *Snapshot
- func (sn *Snapshot) AddTags(addTags []string) (changed bool)
- func (sn *Snapshot) HasHostname(hostnames []string) bool
- func (sn *Snapshot) HasPaths(paths []string) bool
- func (sn *Snapshot) HasTagList(l []TagList) bool
- func (sn *Snapshot) HasTags(l []string) bool
- func (sn Snapshot) ID() *ID
- func (sn *Snapshot) RemoveTags(removeTags []string) (changed bool)
- func (sn Snapshot) String() string
- type SnapshotFilter
- type SnapshotFindCb
- type SnapshotGroupByOptions
- type SnapshotGroupKey
- type Snapshots
- type TagList
- type TagLists
- type Tree
- type TreeItem
- type TreeJSONBuilder
Constants ¶
const MaxRepoVersion = 2
const MinRepoVersion = 1
const StableRepoVersion = 2
StableRepoVersion is the version that is written to the config when a repository is newly created with Init().
Variables ¶
var ErrInvalidData = errors.New("invalid data returned")
ErrInvalidData is used to report that a file is corrupted
var ErrInvalidSnapshotSyntax = errors.New("<snapshot>:<subfolder> syntax not allowed")
var ErrNoSnapshotFound = errors.New("no snapshot found")
ErrNoSnapshotFound is returned when no snapshot for the given criteria could be found.
var ErrRemovedLock = errors.New("lock file was removed in the meantime")
var ErrTreeNotOrdered = errors.New("nodes are not ordered or duplicate")
var StaleLockTimeout = 30 * time.Minute
Functions ¶
func ApplyPolicy ¶
func ApplyPolicy(list Snapshots, p ExpirePolicy) (keep, remove Snapshots, reasons []KeepReason)
ApplyPolicy returns the snapshots from list that are to be kept and removed according to the policy p. list is sorted in the process. reasons contains the reasons to keep each snapshot, it is in the same order as keep.
func FindUsedBlobs ¶
func FindUsedBlobs(ctx context.Context, repo Loader, treeIDs IDs, blobs findBlobSet, p *progress.Counter) error
FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data blobs) to the set blobs. Already seen tree blobs will not be visited again.
func FixTime ¶
FixTime returns a time.Time which can safely be used to marshal as JSON. If the timestamp is earlier than year zero, the year is set to zero. In the same way, if the year is larger than 9999, the year is set to 9999. Other than the year nothing is changed.
func ForAllLocks ¶
func ForAllLocks(ctx context.Context, repo Repository, excludeID *ID, fn func(ID, *Lock, error) error) error
ForAllLocks reads all locks in parallel and calls the given callback. It is guaranteed that the function is not run concurrently. If the callback returns an error, this function is cancelled and also returns that error. If a lock ID is passed via excludeID, it will be ignored.
func ForAllSnapshots ¶
func ForAllSnapshots(ctx context.Context, be Lister, loader LoaderUnpacked, excludeIDs IDSet, fn func(ID, *Snapshot, error) error) error
ForAllSnapshots reads all snapshots in parallel and calls the given function. It is guaranteed that the function is not run concurrently. If the called function returns an error, this function is cancelled and also returns this error. If a snapshot ID is in excludeIDs, it will be ignored.
func GroupSnapshots ¶
func GroupSnapshots(snapshots Snapshots, groupBy SnapshotGroupByOptions) (map[string]Snapshots, bool, error)
GroupSnapshots takes a list of snapshots and a grouping criteria and creates a grouped list of snapshots.
func IsAlreadyLocked ¶
IsAlreadyLocked returns true iff err indicates that a repository is already locked.
func IsInvalidLock ¶
IsInvalidLock returns true iff err indicates that locking failed due to an invalid lock.
func Listxattr ¶
Listxattr retrieves a list of names of extended attributes associated with the given path in the file system.
func LoadJSONUnpacked ¶
func LoadJSONUnpacked(ctx context.Context, repo LoaderUnpacked, t FileType, id ID, item interface{}) (err error)
LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on the item.
func ParallelList ¶
func RemoveAllLocks ¶
func RemoveAllLocks(ctx context.Context, repo Repository) (uint, error)
RemoveAllLocks removes all locks forcefully.
func RemoveStaleLocks ¶
func RemoveStaleLocks(ctx context.Context, repo Repository) (uint, error)
RemoveStaleLocks deletes all locks detected as stale from the repository.
func SaveConfig ¶
func SaveConfig(ctx context.Context, r SaverUnpacked, cfg Config) error
func StreamTrees ¶
func StreamTrees(ctx context.Context, wg *errgroup.Group, repo Loader, trees IDs, skip func(tree ID) bool, p *progress.Counter) <-chan TreeItem
StreamTrees iteratively loads the given trees and their subtrees. The skip method is guaranteed to always be called from the same goroutine. To shutdown the started goroutines, either read all items from the channel or cancel the context. Then `Wait()` on the errgroup until all goroutines were stopped.
func TestDisableCheckPolynomial ¶
TestDisableCheckPolynomial disables the check that the polynomial used for the chunker.
func TestSetLockTimeout ¶
TestSetLockTimeout can be used to reduce the lock wait timeout for tests.
func TestSetSnapshotID ¶
TestSetSnapshotID sets the snapshot's ID.
func ZeroPrefixLen ¶
ZeroPrefixLen returns the length of the longest all-zero prefix of p.
Types ¶
type Blob ¶
type Blob struct { BlobHandle Length uint Offset uint UncompressedLength uint }
Blob is one part of a file or a tree.
func (Blob) DataLength ¶
func (Blob) IsCompressed ¶
type BlobHandle ¶
BlobHandle identifies a blob of a given type.
func NewRandomBlobHandle ¶
func NewRandomBlobHandle() BlobHandle
func TestParseHandle ¶
func TestParseHandle(s string, t BlobType) BlobHandle
TestParseHandle parses s as a ID, panics if that fails and creates a BlobHandle with t.
func (BlobHandle) String ¶
func (h BlobHandle) String() string
type BlobHandles ¶
type BlobHandles []BlobHandle
BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
func (BlobHandles) Len ¶
func (h BlobHandles) Len() int
func (BlobHandles) Less ¶
func (h BlobHandles) Less(i, j int) bool
func (BlobHandles) String ¶
func (h BlobHandles) String() string
func (BlobHandles) Swap ¶
func (h BlobHandles) Swap(i, j int)
type BlobLoader ¶
type BlobSet ¶
type BlobSet map[BlobHandle]struct{}
BlobSet is a set of blobs.
func NewBlobSet ¶
func NewBlobSet(handles ...BlobHandle) BlobSet
NewBlobSet returns a new BlobSet, populated with ids.
func (BlobSet) Has ¶
func (s BlobSet) Has(h BlobHandle) bool
Has returns true iff id is contained in the set.
func (BlobSet) Intersect ¶
Intersect returns a new set containing the handles that are present in both sets.
func (BlobSet) List ¶
func (s BlobSet) List() BlobHandles
List returns a sorted slice of all BlobHandle in the set.
type BlobType ¶
type BlobType uint8
BlobType specifies what a blob stored in a pack is.
const ( InvalidBlob BlobType = iota DataBlob TreeBlob NumBlobTypes // Number of types. Must be last in this enumeration. )
These are the blob types that can be stored in a pack.
func (BlobType) IsMetadata ¶ added in v0.1.2
func (BlobType) MarshalJSON ¶
MarshalJSON encodes the BlobType into JSON.
func (*BlobType) UnmarshalJSON ¶
UnmarshalJSON decodes the BlobType from JSON.
type ByteReader ¶
ByteReader implements a RewindReader for a byte slice.
func NewByteReader ¶
func NewByteReader(buf []byte, hasher hash.Hash) *ByteReader
NewByteReader prepares a ByteReader that can then be used to read buf.
func (*ByteReader) Hash ¶
func (b *ByteReader) Hash() []byte
Hash return a hash of the data if requested by the backed.
func (*ByteReader) Length ¶
func (b *ByteReader) Length() int64
Length returns the number of bytes read from the reader after Rewind is called.
func (*ByteReader) Rewind ¶
func (b *ByteReader) Rewind() error
Rewind restarts the reader from the beginning of the data.
type Config ¶
type Config struct { Version uint `json:"version"` ID string `json:"id"` ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"` }
Config contains the configuration for a repository.
func CreateConfig ¶
CreateConfig creates a config file with a randomly selected polynomial and ID.
func LoadConfig ¶
func LoadConfig(ctx context.Context, r LoaderUnpacked) (Config, error)
LoadConfig returns loads, checks and returns the config for a repository.
type CountedBlobSet ¶
type CountedBlobSet map[BlobHandle]uint8
CountedBlobSet is a set of blobs. For each blob it also stores a uint8 value which can be used to track some information. The CountedBlobSet does not use that value in any way. New entries are created with value 0.
func NewCountedBlobSet ¶
func NewCountedBlobSet(handles ...BlobHandle) CountedBlobSet
NewCountedBlobSet returns a new CountedBlobSet, populated with ids.
func (CountedBlobSet) Copy ¶
func (s CountedBlobSet) Copy() CountedBlobSet
Copy returns a copy of the CountedBlobSet.
func (CountedBlobSet) Delete ¶
func (s CountedBlobSet) Delete(h BlobHandle)
Delete removes id from the set.
func (CountedBlobSet) Has ¶
func (s CountedBlobSet) Has(h BlobHandle) bool
Has returns true iff id is contained in the set.
func (CountedBlobSet) Insert ¶
func (s CountedBlobSet) Insert(h BlobHandle)
Insert adds id to the set.
func (CountedBlobSet) Len ¶
func (s CountedBlobSet) Len() int
func (CountedBlobSet) List ¶
func (s CountedBlobSet) List() BlobHandles
List returns a sorted slice of all BlobHandle in the set.
func (CountedBlobSet) String ¶
func (s CountedBlobSet) String() string
type Duration ¶
type Duration struct {
Hours, Days, Months, Years int
}
Duration is similar to time.Duration, except it only supports larger ranges like hours, days, months, and years.
func ParseDuration ¶
ParseDuration parses a duration from a string. The format is `6y5m234d37h`
func ParseDurationOrPanic ¶
ParseDurationOrPanic parses a duration from a string or panics if string is invalid. The format is `6y5m234d37h`.
type ExpirePolicy ¶
type ExpirePolicy struct { Last int // keep the last n snapshots Hourly int // keep the last n hourly snapshots Daily int // keep the last n daily snapshots Weekly int // keep the last n weekly snapshots Monthly int // keep the last n monthly snapshots Yearly int // keep the last n yearly snapshots Within Duration // keep snapshots made within this duration WithinHourly Duration // keep hourly snapshots made within this duration WithinDaily Duration // keep daily snapshots made within this duration WithinWeekly Duration // keep weekly snapshots made within this duration WithinMonthly Duration // keep monthly snapshots made within this duration WithinYearly Duration // keep yearly snapshots made within this duration Tags []TagList // keep all snapshots that include at least one of the tag lists. }
ExpirePolicy configures which snapshots should be automatically removed.
func (ExpirePolicy) Empty ¶
func (e ExpirePolicy) Empty() bool
Empty returns true if no policy has been configured (all values zero).
func (ExpirePolicy) String ¶
func (e ExpirePolicy) String() (s string)
type ExtendedAttribute ¶
ExtendedAttribute is a tuple storing the xattr name and value.
type FileReader ¶
type FileReader struct { io.ReadSeeker Len int64 // contains filtered or unexported fields }
FileReader implements a RewindReader for an open file.
func NewFileReader ¶
func NewFileReader(f io.ReadSeeker, hash []byte) (*FileReader, error)
NewFileReader wraps f in a *FileReader.
func (*FileReader) Hash ¶
func (f *FileReader) Hash() []byte
Hash return a hash of the data if requested by the backed.
func (*FileReader) Length ¶
func (f *FileReader) Length() int64
Length returns the length of the file.
func (*FileReader) Rewind ¶
func (f *FileReader) Rewind() error
Rewind seeks to the beginning of the file.
type ID ¶
type ID [idSize]byte
ID references content within a repository.
func Find ¶
Find loads the list of all files of type t and searches for names which start with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If more than one is found, nil and ErrMultipleIDMatches is returned.
func FindTreeDirectory ¶ added in v0.1.2
func NewRandomID ¶
func NewRandomID() ID
NewRandomID returns a randomly generated ID. When reading from rand fails, the function panics.
func SaveJSONUnpacked ¶
func SaveJSONUnpacked(ctx context.Context, repo SaverUnpacked, t FileType, item interface{}) (ID, error)
SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the backend as type t, without a pack. It returns the storage hash.
func SaveSnapshot ¶
SaveSnapshot saves the snapshot sn and returns its ID.
func SaveTree ¶
SaveTree stores a tree into the repository and returns the ID. The ID is checked against the index. The tree is only stored when the index does not contain the ID.
func TestParseID ¶
TestParseID parses s as a ID and panics if that fails.
func (ID) MarshalJSON ¶
MarshalJSON returns the JSON encoding of id.
func (*ID) UnmarshalJSON ¶
UnmarshalJSON parses the JSON-encoded data and stores the result in id.
type IDSet ¶
type IDSet map[ID]struct{}
IDSet is a set of IDs.
func (IDSet) Intersect ¶
Intersect returns a new set containing the IDs that are present in both sets.
type JSONUnpackedLoader ¶
type JSONUnpackedLoader interface {
LoadJSONUnpacked(context.Context, FileType, ID, interface{}) error
}
JSONUnpackedLoader loads unpacked JSON.
type KeepReason ¶
type KeepReason struct { Snapshot *Snapshot `json:"snapshot"` // description text which criteria match, e.g. "daily", "monthly" Matches []string `json:"matches"` // the counters after evaluating the current snapshot Counters struct { Last int `json:"last,omitempty"` Hourly int `json:"hourly,omitempty"` Daily int `json:"daily,omitempty"` Weekly int `json:"weekly,omitempty"` Monthly int `json:"monthly,omitempty"` Yearly int `json:"yearly,omitempty"` } `json:"counters"` }
KeepReason specifies why a particular snapshot was kept, and the counters at that point in the policy evaluation.
type Loader ¶
type Loader interface { LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) LookupBlobSize(id ID, tpe BlobType) (uint, bool) Connections() uint }
Loader loads a blob from a repository.
type LoaderUnpacked ¶
type LoaderUnpacked interface { // Connections returns the maximum number of concurrent backend operations Connections() uint LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) }
LoaderUnpacked allows loading a blob not stored in a pack file
type Lock ¶
type Lock struct { Time time.Time `json:"time"` Exclusive bool `json:"exclusive"` Hostname string `json:"hostname"` Username string `json:"username"` PID int `json:"pid"` UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` // contains filtered or unexported fields }
Lock represents a process locking the repository for an operation.
There are two types of locks: exclusive and non-exclusive. There may be many different non-exclusive locks, but at most one exclusive lock, which can only be acquired while no non-exclusive lock is held.
A lock must be refreshed regularly to not be considered stale, this must be triggered by regularly calling Refresh.
func NewExclusiveLock ¶
func NewExclusiveLock(ctx context.Context, repo Repository) (*Lock, error)
NewExclusiveLock returns a new, exclusive lock for the repository. If another lock (normal and exclusive) is already held by another process, it returns an error that satisfies IsAlreadyLocked.
func NewLock ¶
func NewLock(ctx context.Context, repo Repository) (*Lock, error)
NewLock returns a new, non-exclusive lock for the repository. If an exclusive lock is already held by another process, it returns an error that satisfies IsAlreadyLocked.
func (*Lock) Refresh ¶
Refresh refreshes the lock by creating a new file in the backend with a new timestamp. Afterwards the old lock is removed.
func (*Lock) RefreshStaleLock ¶ added in v0.1.2
RefreshStaleLock is an extended variant of Refresh that can also refresh stale lock files.
type MasterIndex ¶
type MasterIndex interface { Has(BlobHandle) bool Lookup(BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration return immediately. This blocks any modification of the index. Each(ctx context.Context, fn func(PackedBlob)) ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs Save(ctx context.Context, repo SaverUnpacked, packBlacklist IDSet, extraObsolete IDs, p *progress.Counter) (obsolete IDSet, err error) }
MasterIndex keeps track of the blobs are stored within files.
type MultipleIDMatchesError ¶
type MultipleIDMatchesError struct {
// contains filtered or unexported fields
}
A MultipleIDMatchesError is returned by Find() when multiple IDs with a given prefix are found.
func (*MultipleIDMatchesError) Error ¶
func (e *MultipleIDMatchesError) Error() string
type NoIDByPrefixError ¶
type NoIDByPrefixError struct {
// contains filtered or unexported fields
}
A NoIDByPrefixError is returned by Find() when no ID for a given prefix could be found.
func (*NoIDByPrefixError) Error ¶
func (e *NoIDByPrefixError) Error() string
type Node ¶
type Node struct { Name string `json:"name"` Type string `json:"type"` Mode os.FileMode `json:"mode,omitempty"` ModTime time.Time `json:"mtime,omitempty"` AccessTime time.Time `json:"atime,omitempty"` ChangeTime time.Time `json:"ctime,omitempty"` UID uint32 `json:"uid"` GID uint32 `json:"gid"` User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` // implicitly base64-encoded field. Only used while encoding, `linktarget_raw` will overwrite LinkTarget if present. // This allows storing arbitrary byte-sequences, which are possible as symlink targets on unix systems, // as LinkTarget without breaking backwards-compatibility. // Must only be set of the linktarget cannot be encoded as valid utf8. LinkTargetRaw []byte `json:"linktarget_raw,omitempty"` ExtendedAttributes []ExtendedAttribute `json:"extended_attributes,omitempty"` Device uint64 `json:"device,omitempty"` // in case of Type == "dev", stat.st_rdev Content IDs `json:"content"` Subtree *ID `json:"subtree,omitempty"` Error string `json:"error,omitempty"` Path string `json:"-"` }
Node is a file, directory or other item in a backup.
func NodeFromFileInfo ¶
NodeFromFileInfo returns a new node from the given path and FileInfo. It returns the first error that is encountered, together with a node.
func (*Node) CreateAt ¶
CreateAt creates the node at the given path but does NOT restore node meta data.
func (Node) GetExtendedAttribute ¶
GetExtendedAttribute gets the extended attribute.
func (Node) MarshalJSON ¶
func (Node) RestoreMetadata ¶
RestoreMetadata restores node metadata
func (Node) RestoreTimestamps ¶
func (*Node) UnmarshalJSON ¶
type PackedBlob ¶
PackedBlob is a blob stored within a file.
type Repository ¶
type Repository interface { // Backend returns the backend used by the repository Backend() backend.Backend // Connections returns the maximum number of concurrent backend operations Connections() uint Key() *crypto.Key Index() MasterIndex LoadIndex(context.Context, *progress.Counter) error SetIndex(MasterIndex) error LookupBlobSize(ID, BlobType) (uint, bool) Config() Config PackSize() uint // List calls the function fn for each file of type t in the repository. // When an error is returned by fn, processing stops and List() returns the // error. // // The function fn is called in the same Goroutine List() was called from. List(ctx context.Context, t FileType, fn func(ID, int64) error) error // ListPack returns the list of blobs saved in the pack id and the length of // the pack header. ListPack(context.Context, ID, int64) ([]Blob, uint32, error) LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error) // StartPackUploader start goroutines to upload new pack files. The errgroup // is used to immediately notify about an upload error. Flush() will also return // that error. StartPackUploader(ctx context.Context, wg *errgroup.Group) Flush(context.Context) error // LoadUnpacked loads and decrypts the file with the given type and ID. LoadUnpacked(ctx context.Context, t FileType, id ID) (data []byte, err error) SaveUnpacked(context.Context, FileType, []byte) (ID, error) }
Repository stores data in a backend. It provides high-level functions and transparently encrypts/decrypts data.
type RewindReader ¶
type RewindReader interface { io.Reader // Rewind rewinds the reader so the same data can be read again from the // start. Rewind() error // Length returns the number of bytes that can be read from the Reader // after calling Rewind. Length() int64 // Hash return a hash of the data if requested by the backed. Hash() []byte }
RewindReader allows resetting the Reader to the beginning of the data.
type SaverUnpacked ¶
type SaverUnpacked interface { // Connections returns the maximum number of concurrent backend operations Connections() uint SaveUnpacked(context.Context, FileType, []byte) (ID, error) }
SaverUnpacked allows saving a blob not stored in a pack file
type Snapshot ¶
type Snapshot struct { Time time.Time `json:"time"` Parent *ID `json:"parent,omitempty"` Tree *ID `json:"tree"` Paths []string `json:"paths"` Hostname string `json:"hostname,omitempty"` Username string `json:"username,omitempty"` UID uint32 `json:"uid,omitempty"` GID uint32 `json:"gid,omitempty"` Excludes []string `json:"excludes,omitempty"` Tags []string `json:"tags,omitempty"` Original *ID `json:"original,omitempty"` ProgramVersion string `json:"program_version,omitempty"` // contains filtered or unexported fields }
Snapshot is the state of a resource at one point in time.
func FindSnapshot ¶
func FindSnapshot(ctx context.Context, be Lister, loader LoaderUnpacked, s string) (*Snapshot, string, error)
FindSnapshot takes a string and tries to find a snapshot whose ID matches the string as closely as possible.
func LoadSnapshot ¶
LoadSnapshot loads the snapshot with the id and returns it.
func NewSnapshot ¶
NewSnapshot returns an initialized snapshot struct for the current user and time.
func TestCreateSnapshot ¶
TestCreateSnapshot creates a snapshot filled with fake data. The fake data is generated deterministically from the timestamp `at`, which is also used as the snapshot's timestamp. The tree's depth can be specified with the parameter depth. The parameter duplication is a probability that the same blob will saved again.
func (*Snapshot) AddTags ¶
AddTags adds the given tags to the snapshots tags, preventing duplicates. It returns true if any changes were made.
func (*Snapshot) HasHostname ¶
HasHostname returns true if either - the snapshot hostname is in the list of the given hostnames, or - the list of given hostnames is empty
func (*Snapshot) HasTagList ¶
HasTagList returns true if either
- the snapshot satisfies at least one TagList, so there is a TagList in l for which all tags are included in sn, or
- l is empty
func (*Snapshot) RemoveTags ¶
RemoveTags removes the given tags from the snapshots tags and returns true if any changes were made.
type SnapshotFilter ¶
type SnapshotFilter struct { Hosts []string Tags TagLists Paths []string // Match snapshots from before this timestamp. Zero for no limit. TimestampLimit time.Time // contains filtered or unexported fields }
A SnapshotFilter denotes a set of snapshots based on hosts, tags and paths.
func (*SnapshotFilter) FindAll ¶
func (f *SnapshotFilter) FindAll(ctx context.Context, be Lister, loader LoaderUnpacked, snapshotIDs []string, fn SnapshotFindCb) error
FindAll yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots.
func (*SnapshotFilter) FindLatest ¶
func (f *SnapshotFilter) FindLatest(ctx context.Context, be Lister, loader LoaderUnpacked, snapshotID string) (*Snapshot, string, error)
FindLatest returns either the latest of a filtered list of all snapshots or a snapshot specified by `snapshotID`.
type SnapshotGroupByOptions ¶
func (*SnapshotGroupByOptions) Set ¶
func (l *SnapshotGroupByOptions) Set(s string) error
func (SnapshotGroupByOptions) String ¶
func (l SnapshotGroupByOptions) String() string
func (*SnapshotGroupByOptions) Type ¶
func (l *SnapshotGroupByOptions) Type() string
type SnapshotGroupKey ¶
type SnapshotGroupKey struct { Hostname string `json:"hostname"` Paths []string `json:"paths"` Tags []string `json:"tags"` }
SnapshotGroupKey is the structure for identifying groups in a grouped snapshot list. This is used by GroupSnapshots()
type Snapshots ¶
type Snapshots []*Snapshot
Snapshots is a list of snapshots.
func TestLoadAllSnapshots ¶ added in v0.1.2
func TestLoadAllSnapshots(ctx context.Context, repo Repository, excludeIDs IDSet) (snapshots Snapshots, err error)
TestLoadAllSnapshots returns a list of all snapshots in the repo. If a snapshot ID is in excludeIDs, it will not be included in the result.
type TagLists ¶
type TagLists []TagList
TagLists consists of several TagList.
type Tree ¶
type Tree struct {
Nodes []*Node `json:"nodes"`
}
Tree is an ordered list of nodes.
type TreeJSONBuilder ¶
type TreeJSONBuilder struct {
// contains filtered or unexported fields
}
func NewTreeJSONBuilder ¶
func NewTreeJSONBuilder() *TreeJSONBuilder
func (*TreeJSONBuilder) AddNode ¶
func (builder *TreeJSONBuilder) AddNode(node *Node) error
func (*TreeJSONBuilder) Finalize ¶
func (builder *TreeJSONBuilder) Finalize() ([]byte, error)
Source Files ¶
- backend_find.go
- blob.go
- blob_set.go
- config.go
- counted_blob_set.go
- doc.go
- duration.go
- find.go
- id.go
- ids.go
- idset.go
- json.go
- lister.go
- lock.go
- lock_unix.go
- mknod_unix.go
- node.go
- node_linux.go
- node_unix.go
- node_xattr.go
- parallel.go
- repository.go
- rewind_reader.go
- snapshot.go
- snapshot_find.go
- snapshot_group.go
- snapshot_policy.go
- tag_list.go
- testing.go
- tree.go
- tree_stream.go
- zeroprefix.go