Documentation ¶
Overview ¶
Package pwr is a generated protocol buffer package.
It is generated from these files:
pwr/pwr.proto
It has these top-level messages:
PatchHeader SyncHeader BsdiffHeader SyncOp SignatureHeader BlockHash CompressionSettings ManifestHeader ManifestBlockHash WoundsHeader Wound
Index ¶
- Constants
- Variables
- func AggregateWounds(outWounds chan *Wound, maxSize int64) chan *Wound
- func AssertNoGhosts(target string, signature *SignatureInfo) error
- func AssertValid(target string, signature *SignatureInfo) error
- func CompressWire(ctx *wire.WriteContext, compression *CompressionSettings) (*wire.WriteContext, error)
- func ComputeBlockSize(fileSize int64, blockIndex int64) int64
- func ComputeNumBlocks(fileSize int64) int64
- func ComputeSignature(ctx context.Context, container *tlc.Container, pool lake.Pool, ...) ([]wsync.BlockHash, error)
- func ComputeSignatureToWriter(ctx context.Context, container *tlc.Container, pool lake.Pool, ...) error
- func CopyContainer(container *tlc.Container, outPool lake.WritablePool, inPool lake.Pool, ...) error
- func DecompressWire(ctx *wire.ReadContext, compression *CompressionSettings) (*wire.ReadContext, error)
- func IsNotExist(err error) bool
- func NewSafeKeeper(params SafeKeeperParams) (lake.Pool, error)
- func RegisterCompressor(a CompressionAlgorithm, c Compressor)
- func RegisterDecompressor(a CompressionAlgorithm, d Decompressor)
- type ArchiveHealer
- func (ah *ArchiveHealer) Do(parentCtx context.Context, container *tlc.Container, wounds chan *Wound) error
- func (ah *ArchiveHealer) HasWounds() bool
- func (ah *ArchiveHealer) SetConsumer(consumer *state.Consumer)
- func (ah *ArchiveHealer) SetLockMap(lockMap LockMap)
- func (ah *ArchiveHealer) TotalCorrupted() int64
- func (ah *ArchiveHealer) TotalHealed() int64
- type BlockHash
- type BlockValidator
- type BsdiffHeader
- type CompressionAlgorithm
- type CompressionSettings
- func (*CompressionSettings) Descriptor() ([]byte, []int)
- func (m *CompressionSettings) GetAlgorithm() CompressionAlgorithm
- func (m *CompressionSettings) GetQuality() int32
- func (*CompressionSettings) ProtoMessage()
- func (m *CompressionSettings) Reset()
- func (m *CompressionSettings) String() string
- func (cs *CompressionSettings) ToString() string
- type Compressor
- type Decompressor
- type DiffContext
- type ErrHasWound
- type HashAlgorithm
- type HashGroups
- type HashInfo
- type Healer
- type LockMap
- type ManifestBlockHash
- type ManifestHeader
- type OnCloseFunc
- type PatchHeader
- type SafeKeeperOpen
- type SafeKeeperParams
- type SignatureHeader
- type SignatureInfo
- type SyncHeader
- type SyncHeader_Type
- type SyncOp
- func (*SyncOp) Descriptor() ([]byte, []int)
- func (m *SyncOp) GetBlockIndex() int64
- func (m *SyncOp) GetBlockSpan() int64
- func (m *SyncOp) GetData() []byte
- func (m *SyncOp) GetFileIndex() int64
- func (m *SyncOp) GetType() SyncOp_Type
- func (*SyncOp) ProtoMessage()
- func (m *SyncOp) Reset()
- func (m *SyncOp) String() string
- type SyncOp_Type
- type ValidatingPool
- func (vp *ValidatingPool) Close() error
- func (vp *ValidatingPool) GetReadSeeker(fileIndex int64) (io.ReadSeeker, error)
- func (vp *ValidatingPool) GetReader(fileIndex int64) (io.Reader, error)
- func (vp *ValidatingPool) GetSize(fileIndex int64) int64
- func (vp *ValidatingPool) GetWriter(fileIndex int64) (io.WriteCloser, error)
- type ValidatorContext
- type Wound
- func (*Wound) Descriptor() ([]byte, []int)
- func (m *Wound) GetEnd() int64
- func (m *Wound) GetIndex() int64
- func (m *Wound) GetKind() WoundKind
- func (m *Wound) GetStart() int64
- func (w *Wound) Healthy() bool
- func (w *Wound) PrettyString(container *tlc.Container) string
- func (*Wound) ProtoMessage()
- func (m *Wound) Reset()
- func (w *Wound) Size() int64
- func (m *Wound) String() string
- type WoundKind
- type WoundsConsumer
- type WoundsFilterFunc
- type WoundsGuardian
- type WoundsHeader
- type WoundsPrinter
- type WoundsWriter
Constants ¶
const ( // PatchMagic is the magic number for wharf patch files (.pwr) PatchMagic = int32(iota + 0xFEF5F00) // SignatureMagic is the magic number for wharf signature files (.pws) SignatureMagic // ManifestMagic is the magic number for wharf manifest files (.pwm) ManifestMagic // WoundsMagic is the magic number for wharf wounds file (.pww) WoundsMagic // ZipIndexMagic is the magic number for wharf zip index files (.pzi) ZipIndexMagic )
const BlockSize int64 = 64 * 1024 // 64k
BlockSize is the standard block size files are broken into when ran through wharf's diff
const MaxWoundSize int64 = 4 * 1024 * 1024 // 4MB
MaxWoundSize is how large AggregateWounds will let an aggregate wound get before passing it along to its consumer. The idea is, when we're verifying a large file, we can start healing it before it's done verifying. In practice, this would require sharing file descriptors, and so it isn't done yet.
const ModeMask = 0644
ModeMask is or'd with files being applied/created
Variables ¶
var CompressionAlgorithm_name = map[int32]string{
0: "NONE",
1: "BROTLI",
2: "GZIP",
3: "ZSTD",
}
var CompressionAlgorithm_value = map[string]int32{
"NONE": 0,
"BROTLI": 1,
"GZIP": 2,
"ZSTD": 3,
}
var Endianness = binary.LittleEndian
Endianness defines the byte order of all fixed-size integers written or read by wharf
var HashAlgorithm_name = map[int32]string{
0: "SHAKE128_32",
1: "CRC32C",
}
var HashAlgorithm_value = map[string]int32{
"SHAKE128_32": 0,
"CRC32C": 1,
}
var SyncHeader_Type_name = map[int32]string{
0: "RSYNC",
1: "BSDIFF",
}
var SyncHeader_Type_value = map[string]int32{
"RSYNC": 0,
"BSDIFF": 1,
}
var SyncOp_Type_name = map[int32]string{
0: "BLOCK_RANGE",
1: "DATA",
2049: "HEY_YOU_DID_IT",
}
var SyncOp_Type_value = map[string]int32{
"BLOCK_RANGE": 0,
"DATA": 1,
"HEY_YOU_DID_IT": 2049,
}
var WoundKind_name = map[int32]string{
0: "FILE",
1: "SYMLINK",
2: "DIR",
3: "CLOSED_FILE",
}
var WoundKind_value = map[string]int32{
"FILE": 0,
"SYMLINK": 1,
"DIR": 2,
"CLOSED_FILE": 3,
}
Functions ¶
func AggregateWounds ¶
AggregateWounds returns a channel that it'll receive wounds from, try to aggregate them into bigger wounds (for example: 250 contiguous 16KB wounds = one 4MB wound), and send to outWounds. It may return wounds bigger than maxSize, since it doesn't do any wound splitting, and it may return wounds smaller than maxSize, since it should relay all input wounds, no matter what size.
func AssertNoGhosts ¶
func AssertNoGhosts(target string, signature *SignatureInfo) error
func AssertValid ¶
func AssertValid(target string, signature *SignatureInfo) error
AssertValid validates target in FailFast mode - it's a shorthand so that setting up ValidatorContext isn't needed
func CompressWire ¶
func CompressWire(ctx *wire.WriteContext, compression *CompressionSettings) (*wire.WriteContext, error)
CompressWire wraps a wire.WriteContext into a compressor, according to given settings, so that any messages written through the returned WriteContext will first be compressed.
func ComputeBlockSize ¶
ComputeBlockSize returns the size of one of the file's blocks, given the size of the file and the position of the block in the file. It'll return BlockSize for all blocks except the last one, if the file size is not a multiple of BlockSize
func ComputeNumBlocks ¶
ComputeNumBlocks returns the number of small blocks a file is made up of. It returns a correct result even when the file's size is not a multiple of BlockSize
func ComputeSignature ¶
func ComputeSignature(ctx context.Context, container *tlc.Container, pool lake.Pool, consumer *state.Consumer) ([]wsync.BlockHash, error)
ComputeSignature compute the signature of all blocks of all files in a given container, by reading them from disk, relative to `basePath`, and notifying `consumer` of its progress
func ComputeSignatureToWriter ¶
func ComputeSignatureToWriter(ctx context.Context, container *tlc.Container, pool lake.Pool, consumer *state.Consumer, sigWriter wsync.SignatureWriter) error
ComputeSignatureToWriter is a variant of ComputeSignature that writes hashes to a callback
func CopyContainer ¶
func CopyContainer(container *tlc.Container, outPool lake.WritablePool, inPool lake.Pool, consumer *state.Consumer) error
CopyContainer copies from one container to the other. Combined with fspool and blockpool, it can be used to split a container into blocks or join it back into regular files.
func DecompressWire ¶
func DecompressWire(ctx *wire.ReadContext, compression *CompressionSettings) (*wire.ReadContext, error)
DecompressWire wraps a wire.ReadContext into a decompressor, according to the given settings, so that any messages read through the returned ReadContext will first be decompressed.
func IsNotExist ¶
IsNotExist is a variant of os.IsNotExist that works with nested errors
func NewSafeKeeper ¶
func NewSafeKeeper(params SafeKeeperParams) (lake.Pool, error)
func RegisterCompressor ¶
func RegisterCompressor(a CompressionAlgorithm, c Compressor)
RegisterCompressor lets wharf know how to compress a stream for a given algorithm
func RegisterDecompressor ¶
func RegisterDecompressor(a CompressionAlgorithm, d Decompressor)
RegisterDecompressor lets wharf know how to decompress a stream for a given algorithm
Types ¶
type ArchiveHealer ¶
type ArchiveHealer struct { // the directory we should heal Target string // an eos path for the archive ArchivePath string // A consumer to report progress to Consumer *state.Consumer // contains filtered or unexported fields }
An ArchiveHealer can repair from a .zip file (remote or local)
func (*ArchiveHealer) Do ¶
func (ah *ArchiveHealer) Do(parentCtx context.Context, container *tlc.Container, wounds chan *Wound) error
Do starts receiving from the wounds channel and healing
func (*ArchiveHealer) HasWounds ¶
func (ah *ArchiveHealer) HasWounds() bool
HasWounds returns true if the healer ever received wounds
func (*ArchiveHealer) SetConsumer ¶
func (ah *ArchiveHealer) SetConsumer(consumer *state.Consumer)
SetConsumer gives this healer a consumer to report progress to
func (*ArchiveHealer) SetLockMap ¶
func (ah *ArchiveHealer) SetLockMap(lockMap LockMap)
func (*ArchiveHealer) TotalCorrupted ¶
func (ah *ArchiveHealer) TotalCorrupted() int64
TotalCorrupted returns the total amount of corrupted data contained in the wounds this healer has received. Dirs and symlink wounds have 0-size, use HasWounds to know if there were any wounds at all.
func (*ArchiveHealer) TotalHealed ¶
func (ah *ArchiveHealer) TotalHealed() int64
TotalHealed returns the total amount of data written to disk to repair the wounds. This might be more than TotalCorrupted, since ArchiveHealer always redownloads whole files, even if they're just partly corrupted
type BlockHash ¶
type BlockHash struct { WeakHash uint32 `protobuf:"varint,1,opt,name=weakHash" json:"weakHash,omitempty"` StrongHash []byte `protobuf:"bytes,2,opt,name=strongHash,proto3" json:"strongHash,omitempty"` }
func (*BlockHash) Descriptor ¶
func (*BlockHash) GetStrongHash ¶
func (*BlockHash) GetWeakHash ¶
func (*BlockHash) ProtoMessage ¶
func (*BlockHash) ProtoMessage()
type BlockValidator ¶
type BlockValidator interface { BlockSize(fileIndex int64, blockIndex int64) int64 ValidateAsError(fileIndex int64, blockIndex int64, data []byte) error ValidateAsWound(fileIndex int64, blockIndex int64, data []byte) Wound }
func NewBlockValidator ¶
func NewBlockValidator(hashInfo *HashInfo) BlockValidator
type BsdiffHeader ¶
type BsdiffHeader struct {
TargetIndex int64 `protobuf:"varint,1,opt,name=targetIndex" json:"targetIndex,omitempty"`
}
func (*BsdiffHeader) Descriptor ¶
func (*BsdiffHeader) Descriptor() ([]byte, []int)
func (*BsdiffHeader) GetTargetIndex ¶
func (m *BsdiffHeader) GetTargetIndex() int64
func (*BsdiffHeader) ProtoMessage ¶
func (*BsdiffHeader) ProtoMessage()
func (*BsdiffHeader) Reset ¶
func (m *BsdiffHeader) Reset()
func (*BsdiffHeader) String ¶
func (m *BsdiffHeader) String() string
type CompressionAlgorithm ¶
type CompressionAlgorithm int32
const ( CompressionAlgorithm_NONE CompressionAlgorithm = 0 CompressionAlgorithm_BROTLI CompressionAlgorithm = 1 CompressionAlgorithm_GZIP CompressionAlgorithm = 2 CompressionAlgorithm_ZSTD CompressionAlgorithm = 3 )
func (CompressionAlgorithm) EnumDescriptor ¶
func (CompressionAlgorithm) EnumDescriptor() ([]byte, []int)
func (CompressionAlgorithm) String ¶
func (x CompressionAlgorithm) String() string
type CompressionSettings ¶
type CompressionSettings struct { Algorithm CompressionAlgorithm `protobuf:"varint,1,opt,name=algorithm,enum=io.itch.wharf.pwr.CompressionAlgorithm" json:"algorithm,omitempty"` Quality int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"` }
func (*CompressionSettings) Descriptor ¶
func (*CompressionSettings) Descriptor() ([]byte, []int)
func (*CompressionSettings) GetAlgorithm ¶
func (m *CompressionSettings) GetAlgorithm() CompressionAlgorithm
func (*CompressionSettings) GetQuality ¶
func (m *CompressionSettings) GetQuality() int32
func (*CompressionSettings) ProtoMessage ¶
func (*CompressionSettings) ProtoMessage()
func (*CompressionSettings) Reset ¶
func (m *CompressionSettings) Reset()
func (*CompressionSettings) String ¶
func (m *CompressionSettings) String() string
func (*CompressionSettings) ToString ¶
func (cs *CompressionSettings) ToString() string
ToString returns a human-readable description of given compression settings
type Compressor ¶
A Compressor can compress a stream given a quality setting
type Decompressor ¶
A Decompressor can decompress a stream with a given algorithm
type DiffContext ¶
type DiffContext struct { Compression *CompressionSettings Consumer *state.Consumer SourceContainer *tlc.Container Pool lake.Pool TargetContainer *tlc.Container TargetSignature []wsync.BlockHash ReusedBytes int64 FreshBytes int64 AddedBytes int64 SavedBytes int64 }
DiffContext holds the state during a diff operation
func (*DiffContext) WritePatch ¶
func (dctx *DiffContext) WritePatch(ctx context.Context, patchWriter io.Writer, signatureWriter io.Writer) error
WritePatch outputs a pwr patch to patchWriter
type ErrHasWound ¶
func (*ErrHasWound) Error ¶
func (e *ErrHasWound) Error() string
type HashAlgorithm ¶
type HashAlgorithm int32
const ( HashAlgorithm_SHAKE128_32 HashAlgorithm = 0 HashAlgorithm_CRC32C HashAlgorithm = 1 )
func (HashAlgorithm) EnumDescriptor ¶
func (HashAlgorithm) EnumDescriptor() ([]byte, []int)
func (HashAlgorithm) String ¶
func (x HashAlgorithm) String() string
type HashGroups ¶
type HashInfo ¶
type HashInfo struct { Container *tlc.Container Groups HashGroups }
func ComputeHashInfo ¶
func ComputeHashInfo(sigInfo *SignatureInfo) (*HashInfo, error)
type Healer ¶
type Healer interface { WoundsConsumer SetConsumer(consumer *state.Consumer) SetLockMap(lockmap LockMap) TotalHealed() int64 }
A Healer consumes wounds and tries to repair them by creating directories, symbolic links, and writing the correct data into files.
type LockMap ¶
type LockMap []chan interface{}
A LockMap is an array of channels, corresponding to file indices of a container. If set, a healer must attempt to receive from the corresponding channel before starting to heal a file. Users of healers should generally pass an array of fresh channels and close them once the file becomes available for healing.
func NewLockMap ¶
type ManifestBlockHash ¶
type ManifestBlockHash struct {
Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
}
func (*ManifestBlockHash) Descriptor ¶
func (*ManifestBlockHash) Descriptor() ([]byte, []int)
func (*ManifestBlockHash) GetHash ¶
func (m *ManifestBlockHash) GetHash() []byte
func (*ManifestBlockHash) ProtoMessage ¶
func (*ManifestBlockHash) ProtoMessage()
func (*ManifestBlockHash) Reset ¶
func (m *ManifestBlockHash) Reset()
func (*ManifestBlockHash) String ¶
func (m *ManifestBlockHash) String() string
type ManifestHeader ¶
type ManifestHeader struct { Compression *CompressionSettings `protobuf:"bytes,1,opt,name=compression" json:"compression,omitempty"` Algorithm HashAlgorithm `protobuf:"varint,2,opt,name=algorithm,enum=io.itch.wharf.pwr.HashAlgorithm" json:"algorithm,omitempty"` }
func (*ManifestHeader) Descriptor ¶
func (*ManifestHeader) Descriptor() ([]byte, []int)
func (*ManifestHeader) GetAlgorithm ¶
func (m *ManifestHeader) GetAlgorithm() HashAlgorithm
func (*ManifestHeader) GetCompression ¶
func (m *ManifestHeader) GetCompression() *CompressionSettings
func (*ManifestHeader) ProtoMessage ¶
func (*ManifestHeader) ProtoMessage()
func (*ManifestHeader) Reset ¶
func (m *ManifestHeader) Reset()
func (*ManifestHeader) String ¶
func (m *ManifestHeader) String() string
type OnCloseFunc ¶
type OnCloseFunc func(fileIndex int64)
type PatchHeader ¶
type PatchHeader struct {
Compression *CompressionSettings `protobuf:"bytes,1,opt,name=compression" json:"compression,omitempty"`
}
func (*PatchHeader) Descriptor ¶
func (*PatchHeader) Descriptor() ([]byte, []int)
func (*PatchHeader) GetCompression ¶
func (m *PatchHeader) GetCompression() *CompressionSettings
func (*PatchHeader) ProtoMessage ¶
func (*PatchHeader) ProtoMessage()
func (*PatchHeader) Reset ¶
func (m *PatchHeader) Reset()
func (*PatchHeader) String ¶
func (m *PatchHeader) String() string
type SafeKeeperOpen ¶
type SafeKeeperOpen func() (savior.SeekSource, error)
type SafeKeeperParams ¶
type SafeKeeperParams struct { Inner lake.Pool Open SafeKeeperOpen }
type SignatureHeader ¶
type SignatureHeader struct {
Compression *CompressionSettings `protobuf:"bytes,1,opt,name=compression" json:"compression,omitempty"`
}
func (*SignatureHeader) Descriptor ¶
func (*SignatureHeader) Descriptor() ([]byte, []int)
func (*SignatureHeader) GetCompression ¶
func (m *SignatureHeader) GetCompression() *CompressionSettings
func (*SignatureHeader) ProtoMessage ¶
func (*SignatureHeader) ProtoMessage()
func (*SignatureHeader) Reset ¶
func (m *SignatureHeader) Reset()
func (*SignatureHeader) String ¶
func (m *SignatureHeader) String() string
type SignatureInfo ¶
A SignatureInfo contains all the hashes for small-blocks of a given container
func ReadSignature ¶
func ReadSignature(ctx context.Context, signatureReader savior.SeekSource) (*SignatureInfo, error)
ReadSignature reads the hashes from all files of a given container, from a wharf signature file.
type SyncHeader ¶
type SyncHeader struct { Type SyncHeader_Type `protobuf:"varint,1,opt,name=type,enum=io.itch.wharf.pwr.SyncHeader_Type" json:"type,omitempty"` FileIndex int64 `protobuf:"varint,16,opt,name=fileIndex" json:"fileIndex,omitempty"` }
func (*SyncHeader) Descriptor ¶
func (*SyncHeader) Descriptor() ([]byte, []int)
func (*SyncHeader) GetFileIndex ¶
func (m *SyncHeader) GetFileIndex() int64
func (*SyncHeader) GetType ¶
func (m *SyncHeader) GetType() SyncHeader_Type
func (*SyncHeader) ProtoMessage ¶
func (*SyncHeader) ProtoMessage()
func (*SyncHeader) Reset ¶
func (m *SyncHeader) Reset()
func (*SyncHeader) String ¶
func (m *SyncHeader) String() string
type SyncHeader_Type ¶
type SyncHeader_Type int32
const ( SyncHeader_RSYNC SyncHeader_Type = 0 // when set, bsdiffTargetIndex must be set SyncHeader_BSDIFF SyncHeader_Type = 1 )
func (SyncHeader_Type) EnumDescriptor ¶
func (SyncHeader_Type) EnumDescriptor() ([]byte, []int)
func (SyncHeader_Type) String ¶
func (x SyncHeader_Type) String() string
type SyncOp ¶
type SyncOp struct { Type SyncOp_Type `protobuf:"varint,1,opt,name=type,enum=io.itch.wharf.pwr.SyncOp_Type" json:"type,omitempty"` FileIndex int64 `protobuf:"varint,2,opt,name=fileIndex" json:"fileIndex,omitempty"` BlockIndex int64 `protobuf:"varint,3,opt,name=blockIndex" json:"blockIndex,omitempty"` BlockSpan int64 `protobuf:"varint,4,opt,name=blockSpan" json:"blockSpan,omitempty"` Data []byte `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` }
func (*SyncOp) Descriptor ¶
func (*SyncOp) GetBlockIndex ¶
func (*SyncOp) GetBlockSpan ¶
func (*SyncOp) GetFileIndex ¶
func (*SyncOp) GetType ¶
func (m *SyncOp) GetType() SyncOp_Type
func (*SyncOp) ProtoMessage ¶
func (*SyncOp) ProtoMessage()
type SyncOp_Type ¶
type SyncOp_Type int32
const ( SyncOp_BLOCK_RANGE SyncOp_Type = 0 SyncOp_DATA SyncOp_Type = 1 SyncOp_HEY_YOU_DID_IT SyncOp_Type = 2049 )
func (SyncOp_Type) EnumDescriptor ¶
func (SyncOp_Type) EnumDescriptor() ([]byte, []int)
func (SyncOp_Type) String ¶
func (x SyncOp_Type) String() string
type ValidatingPool ¶
type ValidatingPool struct { Pool lake.WritablePool // Container must match Pool - may have different file indices than Signature.Container Container *tlc.Container Signature *SignatureInfo Wounds chan *Wound WoundsFilter WoundsFilterFunc OnClose OnCloseFunc // contains filtered or unexported fields }
A ValidatingPool will check files against their hashes, but doesn't check directories or symlinks
func (*ValidatingPool) Close ¶
func (vp *ValidatingPool) Close() error
Close closes the underlying pool (and its reader, if any)
func (*ValidatingPool) GetReadSeeker ¶
func (vp *ValidatingPool) GetReadSeeker(fileIndex int64) (io.ReadSeeker, error)
GetReadSeeker is a pass-through to the underlying Pool, it doesn't validate
func (*ValidatingPool) GetReader ¶
func (vp *ValidatingPool) GetReader(fileIndex int64) (io.Reader, error)
GetReader is a pass-through to the underlying Pool, it doesn't validate
func (*ValidatingPool) GetSize ¶
func (vp *ValidatingPool) GetSize(fileIndex int64) int64
GetSize is a pass-through to the underlying Pool
func (*ValidatingPool) GetWriter ¶
func (vp *ValidatingPool) GetWriter(fileIndex int64) (io.WriteCloser, error)
GetWriter returns a writer that checks hashes before writing to the underlying pool's writer. It tries really hard to be transparent, but does buffer some data, which means some writing is only done when the returned writer is closed.
type ValidatorContext ¶
type ValidatorContext struct { WoundsPath string HealPath string Consumer *state.Consumer // FailFast makes Validate return Wounds as errors and stop checking FailFast bool // internal Wounds chan *Wound WoundsConsumer WoundsConsumer CaseFixStats *lake.CaseFixStats }
ValidatorContext holds both input and output parameters to the validation process (checking that a container corresponds to its signature: that all directories exist, symlinks exist and point to the right destinations, files exist and have the right content)
func (*ValidatorContext) Validate ¶
func (vctx *ValidatorContext) Validate(ctx context.Context, target string, signature *SignatureInfo) error
Validate checks the directory at target using the container info and hashes contained in signature. FailFast mode returns an error on the first corruption seen, other modes write wounds to a file or for a wounds consumer, like a healer.
type Wound ¶
type Wound struct { Index int64 `protobuf:"varint,1,opt,name=index" json:"index,omitempty"` Start int64 `protobuf:"varint,2,opt,name=start" json:"start,omitempty"` End int64 `protobuf:"varint,3,opt,name=end" json:"end,omitempty"` Kind WoundKind `protobuf:"varint,4,opt,name=kind,enum=io.itch.wharf.pwr.WoundKind" json:"kind,omitempty"` }
Describe a corrupted portion of a file, in [start,end)
func (*Wound) Descriptor ¶
func (*Wound) Healthy ¶
Healthy returns true if the wound is not a wound, but simply a progress indicator used when validating files. It should not count towards HasWounds()
func (*Wound) PrettyString ¶
PrettyString returns a human-readable English string for a given wound
func (*Wound) ProtoMessage ¶
func (*Wound) ProtoMessage()
type WoundsConsumer ¶
type WoundsConsumer interface { // Do starts receiving wounds from the given channel, and returns // on error or when wound processing is done. Do(ctx context.Context, container *tlc.Container, wounds chan *Wound) error // TotalCorrupted returns the total size of corrupted data seen by this consumer. // If the only wounds are dir and symlink wounds, this may be 0, but HasWounds might // still be true TotalCorrupted() int64 // HasWounds returns true if any wounds were received by this consumer HasWounds() bool }
A WoundsConsumer takes file corruption information as input, and does something with it: print it, write it to a file, heal the corrupted files.
type WoundsFilterFunc ¶
type WoundsGuardian ¶
type WoundsGuardian struct {
// contains filtered or unexported fields
}
WoundsGuardian is a wounds consumer that returns an error on the first wound received.
func (*WoundsGuardian) Do ¶
func (wg *WoundsGuardian) Do(ctx context.Context, container *tlc.Container, wounds chan *Wound) error
Do returns an error on the first wound received. If no wounds are ever received, it returns nil (no error)
func (*WoundsGuardian) HasWounds ¶
func (wg *WoundsGuardian) HasWounds() bool
HasWounds returns true if the guardian has seen a wound
func (*WoundsGuardian) TotalCorrupted ¶
func (wg *WoundsGuardian) TotalCorrupted() int64
TotalCorrupted is only ever 0 or the size of the first wound, since a guardian doesn't keep track of any wounds beyond that
type WoundsHeader ¶
type WoundsHeader struct { }
Wounds files format: header, container, then any number of Wounds
func (*WoundsHeader) Descriptor ¶
func (*WoundsHeader) Descriptor() ([]byte, []int)
func (*WoundsHeader) ProtoMessage ¶
func (*WoundsHeader) ProtoMessage()
func (*WoundsHeader) Reset ¶
func (m *WoundsHeader) Reset()
func (*WoundsHeader) String ¶
func (m *WoundsHeader) String() string
type WoundsPrinter ¶
WoundsPrinter prints all received wounds as a Debug message to the given consumer.
func (*WoundsPrinter) Do ¶
func (wp *WoundsPrinter) Do(ctx context.Context, container *tlc.Container, wounds chan *Wound) error
Do starts printing wounds. It will return an error if a Consumer is not given
func (*WoundsPrinter) HasWounds ¶
func (wp *WoundsPrinter) HasWounds() bool
HasWounds returns true if this wounds printer has received any wounds at all
func (*WoundsPrinter) TotalCorrupted ¶
func (wp *WoundsPrinter) TotalCorrupted() int64
TotalCorrupted returns the total size of wounds received by this wounds printer
type WoundsWriter ¶
type WoundsWriter struct { WoundsPath string // contains filtered or unexported fields }
WoundsWriter writes wounds to a .pww (wharf wounds file format) file
func (*WoundsWriter) Do ¶
Do only create a file at WoundsPath when it receives the first wound. If no wounds are ever received, Do will effectively be a no-op.
func (*WoundsWriter) HasWounds ¶
func (ww *WoundsWriter) HasWounds() bool
HasWounds returns true if this wounds writer has received any wounds at all
func (*WoundsWriter) TotalCorrupted ¶
func (ww *WoundsWriter) TotalCorrupted() int64
TotalCorrupted returns the total size of wounds received by this wounds writer