Skip to content

Commit

Permalink
mm/swapfile.c: unify normal/huge code path in put_swap_page()
Browse files Browse the repository at this point in the history
In this patch, the normal/huge code path in put_swap_page() and several
helper functions are unified to avoid duplicated code, bugs, etc.  and
make it easier to review the code.

The removed lines are more than added lines.  And the binary size is
kept exactly same when CONFIG_TRANSPARENT_HUGEPAGE=n.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: "Huang, Ying" <[email protected]>
Suggested-by: Dave Hansen <[email protected]>
Acked-by: Dave Hansen <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Shaohua Li <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Minchan Kim <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
yhuang-intel authored and torvalds committed Aug 22, 2018
1 parent 33ee011 commit a448f2d
Showing 1 changed file with 37 additions and 46 deletions.
83 changes: 37 additions & 46 deletions mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,

#ifdef CONFIG_THP_SWAP
#define SWAPFILE_CLUSTER HPAGE_PMD_NR

#define swap_entry_size(size) (size)
#else
#define SWAPFILE_CLUSTER 256

/*
* Define swap_entry_size() as constant to let compiler to optimize
* out some code if !CONFIG_THP_SWAP
*/
#define swap_entry_size(size) 1
#endif
#define LATENCY_LIMIT 256

Expand Down Expand Up @@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry)
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
static void swapcache_free(swp_entry_t entry)
{
struct swap_info_struct *p;

p = _swap_info_get(entry);
if (p) {
if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
free_swap_slot(entry);
}
}

static void swapcache_free_cluster(swp_entry_t entry)
void put_swap_page(struct page *page, swp_entry_t entry)
{
unsigned long offset = swp_offset(entry);
unsigned long idx = offset / SWAPFILE_CLUSTER;
Expand All @@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_entry_t entry)
unsigned char *map;
unsigned int i, free_entries = 0;
unsigned char val;

if (!IS_ENABLED(CONFIG_THP_SWAP))
return;
int size = swap_entry_size(hpage_nr_pages(page));

si = _swap_info_get(entry);
if (!si)
return;

ci = lock_cluster(si, offset);
VM_BUG_ON(!cluster_is_huge(ci));
map = si->swap_map + offset;
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
val = map[i];
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
if (val == SWAP_HAS_CACHE)
free_entries++;
}
if (!free_entries) {
for (i = 0; i < SWAPFILE_CLUSTER; i++)
map[i] &= ~SWAP_HAS_CACHE;
}
cluster_clear_huge(ci);
unlock_cluster(ci);
if (free_entries == SWAPFILE_CLUSTER) {
spin_lock(&si->lock);
if (size == SWAPFILE_CLUSTER) {
ci = lock_cluster(si, offset);
memset(map, 0, SWAPFILE_CLUSTER);
VM_BUG_ON(!cluster_is_huge(ci));
map = si->swap_map + offset;
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
val = map[i];
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
if (val == SWAP_HAS_CACHE)
free_entries++;
}
if (!free_entries) {
for (i = 0; i < SWAPFILE_CLUSTER; i++)
map[i] &= ~SWAP_HAS_CACHE;
}
cluster_clear_huge(ci);
unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
swap_free_cluster(si, idx);
spin_unlock(&si->lock);
} else if (free_entries) {
for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) {
if (free_entries == SWAPFILE_CLUSTER) {
spin_lock(&si->lock);
ci = lock_cluster(si, offset);
memset(map, 0, SWAPFILE_CLUSTER);
unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
swap_free_cluster(si, idx);
spin_unlock(&si->lock);
return;
}
}
if (size == 1 || free_entries) {
for (i = 0; i < size; i++, entry.val++) {
if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
free_swap_slot(entry);
}
Expand All @@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry)
}
#endif

void put_swap_page(struct page *page, swp_entry_t entry)
{
if (!PageTransHuge(page))
swapcache_free(entry);
else
swapcache_free_cluster(entry);
}

static int swp_entry_cmp(const void *ent1, const void *ent2)
{
const swp_entry_t *e1 = ent1, *e2 = ent2;
Expand Down

0 comments on commit a448f2d

Please sign in to comment.