Documentation ¶
Overview ¶
An LRU cached aimed at high concurrency
An LRU cached aimed at high concurrency
Index ¶
- type Cache
- func (c Cache) Clear()
- func (c *Cache[T]) Delete(key string) bool
- func (c *Cache[T]) DeleteFunc(matches func(key string, item *Item[T]) bool) int
- func (c *Cache[T]) DeletePrefix(prefix string) int
- func (c *Cache[T]) Extend(key string, duration time.Duration) bool
- func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
- func (c *Cache[T]) ForEachFunc(matches func(key string, item *Item[T]) bool)
- func (c Cache) GC()
- func (c *Cache[T]) Get(key string) *Item[T]
- func (c Cache) GetDropped() int
- func (c Cache) GetSize() int64
- func (c *Cache[T]) GetWithoutPromote(key string) *Item[T]
- func (c *Cache[T]) ItemCount() int
- func (c *Cache[T]) Replace(key string, value T) bool
- func (c *Cache[T]) Set(key string, value T, duration time.Duration)
- func (c Cache) SetMaxSize(size int64)
- func (c *Cache[T]) Setnx(key string, value T, duration time.Duration)
- func (c *Cache[T]) Setnx2(key string, f func() T, duration time.Duration) *Item[T]
- func (c Cache) Stop()
- func (c Cache) SyncUpdates()
- func (c *Cache[T]) TrackingGet(key string) TrackedItem[T]
- func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T]
- type Configuration
- func (c *Configuration[T]) Buckets(count uint32) *Configuration[T]
- func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T]
- func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T]
- func (c *Configuration[T]) ItemsToPrune(count uint32) *Configuration[T]
- func (c *Configuration[T]) MaxSize(max int64) *Configuration[T]
- func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T]
- func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T]
- func (c *Configuration[T]) Track() *Configuration[T]
- type Item
- type LayeredCache
- func (c LayeredCache) Clear()
- func (c *LayeredCache[T]) Delete(primary, secondary string) bool
- func (c *LayeredCache[T]) DeleteAll(primary string) bool
- func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int
- func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int
- func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
- func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool)
- func (c LayeredCache) GC()
- func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T]
- func (c LayeredCache) GetDropped() int
- func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T]
- func (c LayeredCache) GetSize() int64
- func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T]
- func (c *LayeredCache[T]) ItemCount() int
- func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool
- func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration)
- func (c LayeredCache) SetMaxSize(size int64)
- func (c LayeredCache) Stop()
- func (c LayeredCache) SyncUpdates()
- func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T]
- func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T]
- type List
- type Node
- type SecondaryCache
- func (s *SecondaryCache[T]) Delete(secondary string) bool
- func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
- func (s *SecondaryCache[T]) Get(secondary string) *Item[T]
- func (s *SecondaryCache[T]) Replace(secondary string, value T) bool
- func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T]
- func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T]
- type Sized
- type TrackedItem
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type Cache ¶
type Cache[T any] struct { *Configuration[T] // contains filtered or unexported fields }
func New ¶
func New[T any](config *Configuration[T]) *Cache[T]
Create a new cache with the specified configuration See ccache.Configure() for creating a configuration
func (*Cache[T]) Delete ¶
Remove the item from the cache, return true if the item was present, false otherwise.
func (*Cache[T]) DeleteFunc ¶
Deletes all items that the matches func evaluates to true.
func (*Cache[T]) DeletePrefix ¶
func (*Cache[T]) Extend ¶ added in v3.0.6
Extend the value if it exists, does not set if it doesn't exists. Returns true if the expire time of the item an was extended, false otherwise.
func (*Cache[T]) Fetch ¶
func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
Attempts to get the value from the cache and calles fetch on a miss (missing or stale item). If fetch returns an error, no value is cached and the error is returned back to the caller. Note that Fetch merely calls the public Get and Set functions. If you want a different Fetch behavior, such as thundering herd protection or returning expired items, implement it in your application.
func (*Cache[T]) ForEachFunc ¶
func (Cache) GC ¶
func (c Cache) GC()
Forces GC. There should be no reason to call this function, except from tests which require synchronous GC. This is a control command.
func (*Cache[T]) Get ¶
Get an item from the cache. Returns nil if the item wasn't found. This can return an expired item. Use item.Expired() to see if the item is expired and item.TTL() to see how long until the item expires (which will be negative for an already expired item).
func (Cache) GetDropped ¶
func (c Cache) GetDropped() int
Gets the number of items removed from the cache due to memory pressure since the last time GetDropped was called This is a control command.
func (Cache) GetSize ¶
func (c Cache) GetSize() int64
Gets the size of the cache. This is an O(1) call to make, but it is handled by the worker goroutine. It's meant to be called periodically for metrics, or from tests. This is a control command.
func (*Cache[T]) GetWithoutPromote ¶
Same as Get but does not promote the value. This essentially circumvents the "least recently used" aspect of this cache. To some degree, it's akin to a "peak"
func (*Cache[T]) Replace ¶
Replace the value if it exists, does not set if it doesn't. Returns true if the item existed an was replaced, false otherwise. Replace does not reset item's TTL
func (Cache) SetMaxSize ¶
func (c Cache) SetMaxSize(size int64)
Sets a new max size. That can result in a GC being run if the new maxium size is smaller than the cached size This is a control command.
func (*Cache[T]) Setnx ¶ added in v3.0.5
Setnx set the value in the cache for the specified duration if not exists
func (*Cache[T]) Setnx2 ¶ added in v3.0.6
Setnx2 set the value in the cache for the specified duration if not exists
func (Cache) Stop ¶
func (c Cache) Stop()
Sends a stop signal to the worker thread. The worker thread will shut down 5 seconds after the last message is received. The cache should not be used after Stop is called, but concurrently executing requests should properly finish executing. This is a control command.
func (Cache) SyncUpdates ¶
func (c Cache) SyncUpdates()
SyncUpdates waits until the cache has finished asynchronous state updates for any operations that were done by the current goroutine up to now.
For efficiency, the cache's implementation of LRU behavior is partly managed by a worker goroutine that updates its internal data structures asynchronously. This means that the cache's state in terms of (for instance) eviction of LRU items is only eventually consistent; there is no guarantee that it happens before a Get or Set call has returned. Most of the time application code will not care about this, but especially in a test scenario you may want to be able to know when the worker has caught up.
This applies only to cache methods that were previously called by the same goroutine that is now calling SyncUpdates. If other goroutines are using the cache at the same time, there is no way to know whether any of them still have pending state updates when SyncUpdates returns. This is a control command.
func (*Cache[T]) TrackingGet ¶
func (c *Cache[T]) TrackingGet(key string) TrackedItem[T]
Used when the cache was created with the Track() configuration option. Avoid otherwise
func (*Cache[T]) TrackingSet ¶
func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T]
Used when the cache was created with the Track() configuration option. Sets the item, and returns a tracked reference to it.
type Configuration ¶
type Configuration[T any] struct { // contains filtered or unexported fields }
func Configure ¶
func Configure[T any]() *Configuration[T]
Creates a configuration object with sensible defaults Use this as the start of the fluent configuration: e.g.: ccache.New(ccache.Configure().MaxSize(10000))
func (*Configuration[T]) Buckets ¶
func (c *Configuration[T]) Buckets(count uint32) *Configuration[T]
Keys are hashed into % bucket count to provide greater concurrency (every set requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...) [16]
func (*Configuration[T]) DeleteBuffer ¶
func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T]
The size of the queue for items which should be deleted. If the queue fills up, calls to Delete() will block
func (*Configuration[T]) GetsPerPromote ¶
func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T]
Give a large cache with a high read / write ratio, it's usually unnecessary to promote an item on every Get. GetsPerPromote specifies the number of Gets a key must have before being promoted [3]
func (*Configuration[T]) ItemsToPrune ¶
func (c *Configuration[T]) ItemsToPrune(count uint32) *Configuration[T]
The number of items to prune when memory is low [500]
func (*Configuration[T]) MaxSize ¶
func (c *Configuration[T]) MaxSize(max int64) *Configuration[T]
The max size for the cache [5000]
func (*Configuration[T]) OnDelete ¶
func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T]
OnDelete allows setting a callback function to react to ideam deletion. This typically allows to do a cleanup of resources, such as calling a Close() on cached object that require some kind of tear-down.
func (*Configuration[T]) PromoteBuffer ¶
func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T]
The size of the queue for items which should be promoted. If the queue fills up, promotions are skipped [1024]
func (*Configuration[T]) Track ¶
func (c *Configuration[T]) Track() *Configuration[T]
By turning tracking on and using the cache's TrackingGet, the cache won't evict items which you haven't called Release() on. It's a simple reference counter.
type Item ¶
type Item[T any] struct { // contains filtered or unexported fields }
func (*Item[T]) String ¶
String returns a string representation of the Item. This includes the default string representation of its Value(), as implemented by fmt.Sprintf with "%v", but the exact format of the string should not be relied on; it is provided only for debugging purposes, and because otherwise including an Item in a call to fmt.Printf or fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe way.
type LayeredCache ¶
type LayeredCache[T any] struct { *Configuration[T] // contains filtered or unexported fields }
func Layered ¶
func Layered[T any](config *Configuration[T]) *LayeredCache[T]
See ccache.Configure() for creating a configuration
func (LayeredCache) Clear ¶
func (c LayeredCache) Clear()
Clears the cache This is a control command.
func (*LayeredCache[T]) Delete ¶
func (c *LayeredCache[T]) Delete(primary, secondary string) bool
Remove the item from the cache, return true if the item was present, false otherwise.
func (*LayeredCache[T]) DeleteAll ¶
func (c *LayeredCache[T]) DeleteAll(primary string) bool
Deletes all items that share the same primary key
func (*LayeredCache[T]) DeleteFunc ¶
func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int
Deletes all items that share the same primary key and where the matches func evaluates to true.
func (*LayeredCache[T]) DeletePrefix ¶
func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int
Deletes all items that share the same primary key and prefix.
func (*LayeredCache[T]) Fetch ¶
func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
Attempts to get the value from the cache and calles fetch on a miss. If fetch returns an error, no value is cached and the error is returned back to the caller. Note that Fetch merely calls the public Get and Set functions. If you want a different Fetch behavior, such as thundering herd protection or returning expired items, implement it in your application.
func (*LayeredCache[T]) ForEachFunc ¶
func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool)
func (LayeredCache) GC ¶
func (c LayeredCache) GC()
Forces GC. There should be no reason to call this function, except from tests which require synchronous GC. This is a control command.
func (*LayeredCache[T]) Get ¶
func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T]
Get an item from the cache. Returns nil if the item wasn't found. This can return an expired item. Use item.Expired() to see if the item is expired and item.TTL() to see how long until the item expires (which will be negative for an already expired item).
func (LayeredCache) GetDropped ¶
func (c LayeredCache) GetDropped() int
Gets the number of items removed from the cache due to memory pressure since the last time GetDropped was called This is a control command.
func (*LayeredCache[T]) GetOrCreateSecondaryCache ¶
func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T]
Get the secondary cache for a given primary key. This operation will never return nil. In the case where the primary key does not exist, a new, underlying, empty bucket will be created and returned.
func (LayeredCache) GetSize ¶
func (c LayeredCache) GetSize() int64
Gets the size of the cache. This is an O(1) call to make, but it is handled by the worker goroutine. It's meant to be called periodically for metrics, or from tests. This is a control command.
func (*LayeredCache[T]) GetWithoutPromote ¶
func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T]
Same as Get but does not promote the value. This essentially circumvents the "least recently used" aspect of this cache. To some degree, it's akin to a "peak"
func (*LayeredCache[T]) ItemCount ¶
func (c *LayeredCache[T]) ItemCount() int
func (*LayeredCache[T]) Replace ¶
func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool
Replace the value if it exists, does not set if it doesn't. Returns true if the item existed an was replaced, false otherwise. Replace does not reset item's TTL nor does it alter its position in the LRU
func (*LayeredCache[T]) Set ¶
func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration)
Set the value in the cache for the specified duration
func (LayeredCache) SetMaxSize ¶
func (c LayeredCache) SetMaxSize(size int64)
Sets a new max size. That can result in a GC being run if the new maxium size is smaller than the cached size This is a control command.
func (LayeredCache) Stop ¶
func (c LayeredCache) Stop()
Sends a stop signal to the worker thread. The worker thread will shut down 5 seconds after the last message is received. The cache should not be used after Stop is called, but concurrently executing requests should properly finish executing. This is a control command.
func (LayeredCache) SyncUpdates ¶
func (c LayeredCache) SyncUpdates()
SyncUpdates waits until the cache has finished asynchronous state updates for any operations that were done by the current goroutine up to now.
For efficiency, the cache's implementation of LRU behavior is partly managed by a worker goroutine that updates its internal data structures asynchronously. This means that the cache's state in terms of (for instance) eviction of LRU items is only eventually consistent; there is no guarantee that it happens before a Get or Set call has returned. Most of the time application code will not care about this, but especially in a test scenario you may want to be able to know when the worker has caught up.
This applies only to cache methods that were previously called by the same goroutine that is now calling SyncUpdates. If other goroutines are using the cache at the same time, there is no way to know whether any of them still have pending state updates when SyncUpdates returns. This is a control command.
func (*LayeredCache[T]) TrackingGet ¶
func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T]
Used when the cache was created with the Track() configuration option. Avoid otherwise
func (*LayeredCache[T]) TrackingSet ¶
func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T]
Set the value in the cache for the specified duration
type List ¶
func (*List[T]) MoveToFront ¶
type SecondaryCache ¶
type SecondaryCache[T any] struct { // contains filtered or unexported fields }
func (*SecondaryCache[T]) Delete ¶
func (s *SecondaryCache[T]) Delete(secondary string) bool
Delete a secondary key. The semantics are the same as for LayeredCache.Delete
func (*SecondaryCache[T]) Fetch ¶
func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error)
Fetch or set a secondary key. The semantics are the same as for LayeredCache.Fetch
func (*SecondaryCache[T]) Get ¶
func (s *SecondaryCache[T]) Get(secondary string) *Item[T]
Get the secondary key. The semantics are the same as for LayeredCache.Get
func (*SecondaryCache[T]) Replace ¶
func (s *SecondaryCache[T]) Replace(secondary string, value T) bool
Replace a secondary key. The semantics are the same as for LayeredCache.Replace
func (*SecondaryCache[T]) Set ¶
func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T]
Set the secondary key to a value. The semantics are the same as for LayeredCache.Set
func (*SecondaryCache[T]) TrackingGet ¶
func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T]
Track a secondary key. The semantics are the same as for LayeredCache.TrackingGet