Skip to content

Commit

Permalink
Remove unused @lru_cache decorator (matrix-org#13595)
Browse files Browse the repository at this point in the history
* Remove unused `@lru_cache` decorator

Spotted this working on something else.

Co-authored-by: David Robertson <[email protected]>
  • Loading branch information
Fizzadar and David Robertson authored Oct 25, 2022
1 parent d125919 commit c9dffd5
Show file tree
Hide file tree
Showing 3 changed files with 5 additions and 140 deletions.
1 change: 1 addition & 0 deletions changelog.d/13595.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Remove unused `@lru_cache` decorator.
104 changes: 0 additions & 104 deletions synapse/util/caches/descriptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import functools
import inspect
import logging
Expand Down Expand Up @@ -146,109 +145,6 @@ def __init__(
)


class _LruCachedFunction(Generic[F]):
cache: LruCache[CacheKey, Any]
__call__: F


def lru_cache(
*, max_entries: int = 1000, cache_context: bool = False
) -> Callable[[F], _LruCachedFunction[F]]:
"""A method decorator that applies a memoizing cache around the function.
This is more-or-less a drop-in equivalent to functools.lru_cache, although note
that the signature is slightly different.
The main differences with functools.lru_cache are:
(a) the size of the cache can be controlled via the cache_factor mechanism
(b) the wrapped function can request a "cache_context" which provides a
callback mechanism to indicate that the result is no longer valid
(c) prometheus metrics are exposed automatically.
The function should take zero or more arguments, which are used as the key for the
cache. Single-argument functions use that argument as the cache key; otherwise the
arguments are built into a tuple.
Cached functions can be "chained" (i.e. a cached function can call other cached
functions and get appropriately invalidated when they called caches are
invalidated) by adding a special "cache_context" argument to the function
and passing that as a kwarg to all caches called. For example:
@lru_cache(cache_context=True)
def foo(self, key, cache_context):
r1 = self.bar1(key, on_invalidate=cache_context.invalidate)
r2 = self.bar2(key, on_invalidate=cache_context.invalidate)
return r1 + r2
The wrapped function also has a 'cache' property which offers direct access to the
underlying LruCache.
"""

def func(orig: F) -> _LruCachedFunction[F]:
desc = LruCacheDescriptor(
orig,
max_entries=max_entries,
cache_context=cache_context,
)
return cast(_LruCachedFunction[F], desc)

return func


class LruCacheDescriptor(_CacheDescriptorBase):
"""Helper for @lru_cache"""

class _Sentinel(enum.Enum):
sentinel = object()

def __init__(
self,
orig: Callable[..., Any],
max_entries: int = 1000,
cache_context: bool = False,
):
super().__init__(
orig, num_args=None, uncached_args=None, cache_context=cache_context
)
self.max_entries = max_entries

def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]:
cache: LruCache[CacheKey, Any] = LruCache(
cache_name=self.name,
max_size=self.max_entries,
)

get_cache_key = self.cache_key_builder
sentinel = LruCacheDescriptor._Sentinel.sentinel

@functools.wraps(self.orig)
def _wrapped(*args: Any, **kwargs: Any) -> Any:
invalidate_callback = kwargs.pop("on_invalidate", None)
callbacks = (invalidate_callback,) if invalidate_callback else ()

cache_key = get_cache_key(args, kwargs)

ret = cache.get(cache_key, default=sentinel, callbacks=callbacks)
if ret != sentinel:
return ret

# Add our own `cache_context` to argument list if the wrapped function
# has asked for one
if self.add_cache_context:
kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)

ret2 = self.orig(obj, *args, **kwargs)
cache.set(cache_key, ret2, callbacks=callbacks)

return ret2

wrapped = cast(CachedFunction, _wrapped)
wrapped.cache = cache
obj.__dict__[self.name] = wrapped

return wrapped


class DeferredCacheDescriptor(_CacheDescriptorBase):
"""A method decorator that applies a memoizing cache around the function.
Expand Down
40 changes: 4 additions & 36 deletions tests/util/caches/test_descriptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,46 +28,14 @@
make_deferred_yieldable,
)
from synapse.util.caches import descriptors
from synapse.util.caches.descriptors import cached, cachedList, lru_cache
from synapse.util.caches.descriptors import cached, cachedList

from tests import unittest
from tests.test_utils import get_awaitable_result

logger = logging.getLogger(__name__)


class LruCacheDecoratorTestCase(unittest.TestCase):
def test_base(self):
class Cls:
def __init__(self):
self.mock = mock.Mock()

@lru_cache()
def fn(self, arg1, arg2):
return self.mock(arg1, arg2)

obj = Cls()
obj.mock.return_value = "fish"
r = obj.fn(1, 2)
self.assertEqual(r, "fish")
obj.mock.assert_called_once_with(1, 2)
obj.mock.reset_mock()

# a call with different params should call the mock again
obj.mock.return_value = "chips"
r = obj.fn(1, 3)
self.assertEqual(r, "chips")
obj.mock.assert_called_once_with(1, 3)
obj.mock.reset_mock()

# the two values should now be cached
r = obj.fn(1, 2)
self.assertEqual(r, "fish")
r = obj.fn(1, 3)
self.assertEqual(r, "chips")
obj.mock.assert_not_called()


def run_on_reactor():
d = defer.Deferred()
reactor.callLater(0, d.callback, 0)
Expand Down Expand Up @@ -478,10 +446,10 @@ async def func1(self, key, cache_context):

@cached(cache_context=True)
async def func2(self, key, cache_context):
return self.func3(key, on_invalidate=cache_context.invalidate)
return await self.func3(key, on_invalidate=cache_context.invalidate)

@lru_cache(cache_context=True)
def func3(self, key, cache_context):
@cached(cache_context=True)
async def func3(self, key, cache_context):
self.invalidate = cache_context.invalidate
return 42

Expand Down

0 comments on commit c9dffd5

Please sign in to comment.