forked from ava-labs/avalanchego
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lru_sized_cache.go
126 lines (98 loc) · 2.62 KB
/
lru_sized_cache.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package cache
import (
"sync"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/linkedhashmap"
)
var _ Cacher[struct{}, any] = (*sizedLRU[struct{}, any])(nil)
// sizedLRU is a key value store with bounded size. If the size is attempted to
// be exceeded, then elements are removed from the cache until the bound is
// honored, based on evicting the least recently used value.
type sizedLRU[K comparable, V any] struct {
lock sync.Mutex
elements linkedhashmap.LinkedHashmap[K, V]
maxSize int
currentSize int
size func(K, V) int
}
func NewSizedLRU[K comparable, V any](maxSize int, size func(K, V) int) Cacher[K, V] {
return &sizedLRU[K, V]{
elements: linkedhashmap.New[K, V](),
maxSize: maxSize,
size: size,
}
}
func (c *sizedLRU[K, V]) Put(key K, value V) {
c.lock.Lock()
defer c.lock.Unlock()
c.put(key, value)
}
func (c *sizedLRU[K, V]) Get(key K) (V, bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.get(key)
}
func (c *sizedLRU[K, V]) Evict(key K) {
c.lock.Lock()
defer c.lock.Unlock()
c.evict(key)
}
func (c *sizedLRU[K, V]) Flush() {
c.lock.Lock()
defer c.lock.Unlock()
c.flush()
}
func (c *sizedLRU[_, _]) Len() int {
c.lock.Lock()
defer c.lock.Unlock()
return c.len()
}
func (c *sizedLRU[_, _]) PortionFilled() float64 {
c.lock.Lock()
defer c.lock.Unlock()
return c.portionFilled()
}
func (c *sizedLRU[K, V]) put(key K, value V) {
newEntrySize := c.size(key, value)
if newEntrySize > c.maxSize {
c.flush()
return
}
if oldValue, ok := c.elements.Get(key); ok {
c.currentSize -= c.size(key, oldValue)
}
// Remove elements until the size of elements in the cache <= [c.maxSize].
for c.currentSize > c.maxSize-newEntrySize {
oldestKey, oldestValue, _ := c.elements.Oldest()
c.elements.Delete(oldestKey)
c.currentSize -= c.size(oldestKey, oldestValue)
}
c.elements.Put(key, value)
c.currentSize += newEntrySize
}
func (c *sizedLRU[K, V]) get(key K) (V, bool) {
value, ok := c.elements.Get(key)
if !ok {
return utils.Zero[V](), false
}
c.elements.Put(key, value) // Mark [k] as MRU.
return value, true
}
func (c *sizedLRU[K, _]) evict(key K) {
if value, ok := c.elements.Get(key); ok {
c.elements.Delete(key)
c.currentSize -= c.size(key, value)
}
}
func (c *sizedLRU[K, V]) flush() {
c.elements = linkedhashmap.New[K, V]()
c.currentSize = 0
}
func (c *sizedLRU[_, _]) len() int {
return c.elements.Len()
}
func (c *sizedLRU[_, _]) portionFilled() float64 {
return float64(c.currentSize) / float64(c.maxSize)
}