Skip to content

Commit

Permalink
mm: zswap: function ordering: shrink_memcg_cb
Browse files Browse the repository at this point in the history
shrink_memcg_cb() is called by the shrinker and is based on
zswap_writeback_entry(). Move it in between. Save one fwd decl.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Reviewed-by: Nhat Pham <[email protected]>
Cc: Chengming Zhou <[email protected]>
Cc: Yosry Ahmed <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
hnaz authored and akpm00 committed Feb 22, 2024
1 parent 9986d35 commit eb23ee4
Showing 1 changed file with 61 additions and 64 deletions.
125 changes: 61 additions & 64 deletions mm/zswap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1254,7 +1254,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* shrinker functions
**********************************/
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
spinlock_t *lock, void *arg);
spinlock_t *lock, void *arg)
{
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
bool *encountered_page_in_swapcache = (bool *)arg;
swp_entry_t swpentry;
enum lru_status ret = LRU_REMOVED_RETRY;
int writeback_result;

/*
* Rotate the entry to the tail before unlocking the LRU,
* so that in case of an invalidation race concurrent
* reclaimers don't waste their time on it.
*
* If writeback succeeds, or failure is due to the entry
* being invalidated by the swap subsystem, the invalidation
* will unlink and free it.
*
* Temporary failures, where the same entry should be tried
* again immediately, almost never happen for this shrinker.
* We don't do any trylocking; -ENOMEM comes closest,
* but that's extremely rare and doesn't happen spuriously
* either. Don't bother distinguishing this case.
*
* But since they do exist in theory, the entry cannot just
* be unlinked, or we could leak it. Hence, rotate.
*/
list_move_tail(item, &l->list);

/*
* Once the lru lock is dropped, the entry might get freed. The
* swpentry is copied to the stack, and entry isn't deref'd again
* until the entry is verified to still be alive in the tree.
*/
swpentry = entry->swpentry;

/*
* It's safe to drop the lock here because we return either
* LRU_REMOVED_RETRY or LRU_RETRY.
*/
spin_unlock(lock);

writeback_result = zswap_writeback_entry(entry, swpentry);

if (writeback_result) {
zswap_reject_reclaim_fail++;
ret = LRU_RETRY;

/*
* Encountering a page already in swap cache is a sign that we are shrinking
* into the warmer region. We should terminate shrinking (if we're in the dynamic
* shrinker context).
*/
if (writeback_result == -EEXIST && encountered_page_in_swapcache)
*encountered_page_in_swapcache = true;
} else {
zswap_written_back_pages++;
}

spin_lock(lock);
return ret;
}

static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
Expand Down Expand Up @@ -1354,69 +1414,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
pool->shrinker->seeks = DEFAULT_SEEKS;
}

static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
spinlock_t *lock, void *arg)
{
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
bool *encountered_page_in_swapcache = (bool *)arg;
swp_entry_t swpentry;
enum lru_status ret = LRU_REMOVED_RETRY;
int writeback_result;

/*
* Rotate the entry to the tail before unlocking the LRU,
* so that in case of an invalidation race concurrent
* reclaimers don't waste their time on it.
*
* If writeback succeeds, or failure is due to the entry
* being invalidated by the swap subsystem, the invalidation
* will unlink and free it.
*
* Temporary failures, where the same entry should be tried
* again immediately, almost never happen for this shrinker.
* We don't do any trylocking; -ENOMEM comes closest,
* but that's extremely rare and doesn't happen spuriously
* either. Don't bother distinguishing this case.
*
* But since they do exist in theory, the entry cannot just
* be unlinked, or we could leak it. Hence, rotate.
*/
list_move_tail(item, &l->list);

/*
* Once the lru lock is dropped, the entry might get freed. The
* swpentry is copied to the stack, and entry isn't deref'd again
* until the entry is verified to still be alive in the tree.
*/
swpentry = entry->swpentry;

/*
* It's safe to drop the lock here because we return either
* LRU_REMOVED_RETRY or LRU_RETRY.
*/
spin_unlock(lock);

writeback_result = zswap_writeback_entry(entry, swpentry);

if (writeback_result) {
zswap_reject_reclaim_fail++;
ret = LRU_RETRY;

/*
* Encountering a page already in swap cache is a sign that we are shrinking
* into the warmer region. We should terminate shrinking (if we're in the dynamic
* shrinker context).
*/
if (writeback_result == -EEXIST && encountered_page_in_swapcache)
*encountered_page_in_swapcache = true;
} else {
zswap_written_back_pages++;
}

spin_lock(lock);
return ret;
}

static int shrink_memcg(struct mem_cgroup *memcg)
{
struct zswap_pool *pool;
Expand Down

0 comments on commit eb23ee4

Please sign in to comment.