Documentation ¶
Index ¶
- Variables
- func InitBuckets(p string) error
- func IsBucket(name BucketType) bool
- type Bucket
- func (b *Bucket) Evictable() bool
- func (b *Bucket) FileList() ([]fs.DirEntry, error)
- func (b *Bucket) Init() error
- func (b *Bucket) LogValue() slog.Value
- func (b *Bucket) ReadSeeker(name string) (io.ReadSeekCloser, error)
- func (b *Bucket) Reader(name string) (io.ReadCloser, error)
- func (b *Bucket) RemoveFile(name string) error
- func (b *Bucket) ResetCache() error
- func (b *Bucket) RunEvictionPolicy(logger *slog.Logger) (int, error)
- func (b *Bucket) Stat(name string) (fs.FileInfo, error)
- func (b *Bucket) Statistics(refresh bool) *BucketStats
- func (b *Bucket) WriteFile(name string, r io.Reader, mode fs.FileMode) error
- type BucketStats
- type BucketType
Constants ¶
This section is empty.
Variables ¶
View Source
var ( NoCompression = gzip.NoCompression DefaultCompression = gzip.DefaultCompression )
Functions ¶
func InitBuckets ¶ added in v0.25.0
func IsBucket ¶ added in v0.25.0
func IsBucket(name BucketType) bool
Types ¶
type Bucket ¶ added in v0.25.0
type Bucket struct { RootPath string Name string // Persistent is a sanity check flag for important buckets such as the tests bucket // Such that eviction or cleaning is never performed Persistent bool // Cache is true only if the bucket should act like a cache // That is, it can be fully purged using the Reset() method // It's a safeguard against accidentally removing real data Cache bool MaxSize int64 // Maximum size in bytes. Values < 1024 mean system is off MaxTTL time.Duration // Maximum duration before emptying // 0 = flate.NoCompression // -1 = flate.DefaultCompression CompressionLevel int // contains filtered or unexported fields }
func GetBucket ¶ added in v0.25.0
func GetBucket(name BucketType) *Bucket
GetBucket panics if there is no bucket with that name
func GetBuckets ¶ added in v0.25.0
func GetBuckets() []*Bucket
func (*Bucket) ReadSeeker ¶ added in v0.25.0
func (b *Bucket) ReadSeeker(name string) (io.ReadSeekCloser, error)
ReadSeeker tries to open the given file using the normal reader function. If the output implements ReadSeekCloser, then it is used directly. Otherwise, we decompress on the fly into a temp file and return that instead (it will be deleted on Close()). TODO: Better caching, maybe some kind of sub-bucket concept?
func (*Bucket) Reader ¶ added in v0.25.0
func (b *Bucket) Reader(name string) (io.ReadCloser, error)
func (*Bucket) RemoveFile ¶ added in v0.25.0
func (*Bucket) ResetCache ¶ added in v0.25.0
func (*Bucket) RunEvictionPolicy ¶ added in v0.25.0
func (*Bucket) Statistics ¶ added in v0.25.0
func (b *Bucket) Statistics(refresh bool) *BucketStats
type BucketStats ¶ added in v0.25.0
type BucketType ¶ added in v0.25.0
type BucketType string
const ( BucketTypeNone BucketType = "" BucketTypeTests BucketType = "tests" BucketTypeSubtests BucketType = "subtests" BucketTypeAttachments BucketType = "attachments" BucketTypeAvatars BucketType = "avatars" BucketTypeCheckers BucketType = "checkers" BucketTypeCompiles BucketType = "compiles" BucketTypeArtifacts BucketType = "artifacts" )
func (BucketType) Valid ¶ added in v0.25.0
func (t BucketType) Valid() bool
Click to show internal directories.
Click to hide internal directories.