Skip to content

Commit

Permalink
mm/swap: implement workingset detection for anonymous LRU
Browse files Browse the repository at this point in the history
This patch implements workingset detection for anonymous LRU.  All the
infrastructure is implemented by the previous patches so this patch just
activates the workingset detection by installing/retrieving the shadow
entry and adding refault calculation.

Signed-off-by: Joonsoo Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Minchan Kim <[email protected]>
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JoonsooKim authored and torvalds committed Aug 12, 2020
1 parent 3852f67 commit aae466b
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 19 deletions.
6 changes: 6 additions & 0 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[];
extern unsigned long total_swapcache_pages(void);
extern void show_swap_cache_info(void);
extern int add_to_swap(struct page *page);
extern void *get_shadow_from_swap_cache(swp_entry_t entry);
extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp);
extern void __delete_from_swap_cache(struct page *page,
Expand Down Expand Up @@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page)
return 0;
}

static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
{
return NULL;
}

static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp_mask, void **shadowp)
{
Expand Down
11 changes: 4 additions & 7 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
int locked;
int exclusive = 0;
vm_fault_t ret = 0;
void *shadow = NULL;

if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
goto out;
Expand Down Expand Up @@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_page;
}

/*
* XXX: Move to lru_cache_add() when it
* supports new vs putback
*/
spin_lock_irq(&page_pgdat(page)->lru_lock);
lru_note_cost_page(page);
spin_unlock_irq(&page_pgdat(page)->lru_lock);
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
workingset_refault(page, shadow);

lru_cache_add(page);
swap_readpage(page, true);
Expand Down
23 changes: 18 additions & 5 deletions mm/swap_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,20 @@ void show_swap_cache_info(void)
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}

void *get_shadow_from_swap_cache(swp_entry_t entry)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
struct page *page;

page = find_get_entry(address_space, idx);
if (xa_is_value(page))
return page;
if (page)
put_page(page);
return NULL;
}

/*
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
Expand Down Expand Up @@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
{
struct swap_info_struct *si;
struct page *page;
void *shadow = NULL;

*new_page_allocated = false;

Expand Down Expand Up @@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
__SetPageSwapBacked(page);

/* May fail (-ENOMEM) if XArray node allocation failed. */
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
put_swap_page(page, entry);
goto fail_unlock;
}
Expand All @@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
goto fail_unlock;
}

/* XXX: Move to lru_cache_add() when it supports new vs putback */
spin_lock_irq(&page_pgdat(page)->lru_lock);
lru_note_cost_page(page);
spin_unlock_irq(&page_pgdat(page)->lru_lock);
if (shadow)
workingset_refault(page, shadow);

/* Caller will initiate read into locked page */
SetPageWorkingset(page);
Expand Down
7 changes: 4 additions & 3 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
{
unsigned long flags;
int refcount;
void *shadow = NULL;

BUG_ON(!PageLocked(page));
BUG_ON(mapping != page_mapping(page));
Expand Down Expand Up @@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
if (PageSwapCache(page)) {
swp_entry_t swap = { .val = page_private(page) };
mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page, swap, NULL);
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(page, target_memcg);
__delete_from_swap_cache(page, swap, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags);
put_swap_page(page, swap);
workingset_eviction(page, target_memcg);
} else {
void (*freepage)(struct page *);
void *shadow = NULL;

freepage = mapping->a_ops->freepage;
/*
Expand Down
15 changes: 11 additions & 4 deletions mm/workingset.c
Original file line number Diff line number Diff line change
Expand Up @@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow)
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
* all the memory was available to the page cache. Whether
* cache can compete with anon or not depends on having swap.
* all the memory was available to the workingset. Whether
* workingset competition needs to consider anon or not depends
* on having swap.
*/
workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
if (!file) {
workingset_size += lruvec_page_state(eviction_lruvec,
NR_INACTIVE_ANON);
NR_INACTIVE_FILE);
}
if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
workingset_size += lruvec_page_state(eviction_lruvec,
NR_ACTIVE_ANON);
if (file) {
workingset_size += lruvec_page_state(eviction_lruvec,
NR_INACTIVE_ANON);
}
}
if (refault_distance > workingset_size)
goto out;
Expand Down

0 comments on commit aae466b

Please sign in to comment.