Skip to content

Commit

Permalink
mm: zswap: function ordering: writeback
Browse files Browse the repository at this point in the history
Shrinking needs writeback. Naturally, move the writeback code above
the shrinking code. Delete the forward decl.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Reviewed-by: Nhat Pham <[email protected]>
Cc: Chengming Zhou <[email protected]>
Cc: Yosry Ahmed <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
hnaz authored and akpm00 committed Feb 22, 2024
1 parent 64f200b commit 9986d35
Showing 1 changed file with 90 additions and 93 deletions.
183 changes: 90 additions & 93 deletions mm/zswap.c
Original file line number Diff line number Diff line change
Expand Up @@ -276,9 +276,6 @@ static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \
zpool_get_type((p)->zpools[0]))

static int zswap_writeback_entry(struct zswap_entry *entry,
swp_entry_t swpentry);

static bool zswap_is_full(void)
{
return totalram_pages() * zswap_max_pool_percent / 100 <
Expand Down Expand Up @@ -1163,6 +1160,96 @@ static void zswap_decompress(struct zswap_entry *entry, struct page *page)
zpool_unmap_handle(zpool, entry->handle);
}

/*********************************
* writeback code
**********************************/
/*
* Attempts to free an entry by adding a folio to the swap cache,
* decompressing the entry data into the folio, and issuing a
* bio write to write the folio back to the swap device.
*
* This can be thought of as a "resumed writeback" of the folio
* to the swap device. We are basically resuming the same swap
* writeback path that was intercepted with the zswap_store()
* in the first place. After the folio has been decompressed into
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
static int zswap_writeback_entry(struct zswap_entry *entry,
swp_entry_t swpentry)
{
struct zswap_tree *tree;
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};

/* try to allocate swap cache folio */
mpol = get_task_policy(current);
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
if (!folio)
return -ENOMEM;

/*
* Found an existing folio, we raced with swapin or concurrent
* shrinker. We generally writeback cold folios from zswap, and
* swapin means the folio just became hot, so skip this folio.
* For unlikely concurrent shrinker case, it will be unlinked
* and freed when invalidated by the concurrent shrinker anyway.
*/
if (!folio_was_allocated) {
folio_put(folio);
return -EEXIST;
}

/*
* folio is locked, and the swapcache is now secured against
* concurrent swapping to and from the slot. Verify that the
* swap entry hasn't been invalidated and recycled behind our
* backs (our zswap_entry reference doesn't prevent that), to
* avoid overwriting a new swap folio with old compressed data.
*/
tree = swap_zswap_tree(swpentry);
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(folio);
folio_unlock(folio);
folio_put(folio);
return -ENOMEM;
}

/* Safe to deref entry after the entry is verified above. */
zswap_entry_get(entry);
spin_unlock(&tree->lock);

zswap_decompress(entry, &folio->page);

count_vm_event(ZSWPWB);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPWB);

spin_lock(&tree->lock);
zswap_invalidate_entry(tree, entry);
zswap_entry_put(entry);
spin_unlock(&tree->lock);

/* folio is up to date */
folio_mark_uptodate(folio);

/* move it to the tail of the inactive list after end_writeback */
folio_set_reclaim(folio);

/* start writeback */
__swap_writepage(folio, &wbc);
folio_put(folio);

return 0;
}

/*********************************
* shrinker functions
**********************************/
Expand Down Expand Up @@ -1419,96 +1506,6 @@ static void shrink_worker(struct work_struct *w)
zswap_pool_put(pool);
}

/*********************************
* writeback code
**********************************/
/*
* Attempts to free an entry by adding a folio to the swap cache,
* decompressing the entry data into the folio, and issuing a
* bio write to write the folio back to the swap device.
*
* This can be thought of as a "resumed writeback" of the folio
* to the swap device. We are basically resuming the same swap
* writeback path that was intercepted with the zswap_store()
* in the first place. After the folio has been decompressed into
* the swap cache, the compressed version stored by zswap can be
* freed.
*/
static int zswap_writeback_entry(struct zswap_entry *entry,
swp_entry_t swpentry)
{
struct zswap_tree *tree;
struct folio *folio;
struct mempolicy *mpol;
bool folio_was_allocated;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
};

/* try to allocate swap cache folio */
mpol = get_task_policy(current);
folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
if (!folio)
return -ENOMEM;

/*
* Found an existing folio, we raced with swapin or concurrent
* shrinker. We generally writeback cold folios from zswap, and
* swapin means the folio just became hot, so skip this folio.
* For unlikely concurrent shrinker case, it will be unlinked
* and freed when invalidated by the concurrent shrinker anyway.
*/
if (!folio_was_allocated) {
folio_put(folio);
return -EEXIST;
}

/*
* folio is locked, and the swapcache is now secured against
* concurrent swapping to and from the slot. Verify that the
* swap entry hasn't been invalidated and recycled behind our
* backs (our zswap_entry reference doesn't prevent that), to
* avoid overwriting a new swap folio with old compressed data.
*/
tree = swap_zswap_tree(swpentry);
spin_lock(&tree->lock);
if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
spin_unlock(&tree->lock);
delete_from_swap_cache(folio);
folio_unlock(folio);
folio_put(folio);
return -ENOMEM;
}

/* Safe to deref entry after the entry is verified above. */
zswap_entry_get(entry);
spin_unlock(&tree->lock);

zswap_decompress(entry, &folio->page);

count_vm_event(ZSWPWB);
if (entry->objcg)
count_objcg_event(entry->objcg, ZSWPWB);

spin_lock(&tree->lock);
zswap_invalidate_entry(tree, entry);
zswap_entry_put(entry);
spin_unlock(&tree->lock);

/* folio is up to date */
folio_mark_uptodate(folio);

/* move it to the tail of the inactive list after end_writeback */
folio_set_reclaim(folio);

/* start writeback */
__swap_writepage(folio, &wbc);
folio_put(folio);

return 0;
}

static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
{
unsigned long *page;
Expand Down

0 comments on commit 9986d35

Please sign in to comment.