Skip to content

Commit

Permalink
vmscan: free swap space on swap-in/activation
Browse files Browse the repository at this point in the history
If vm_swap_full() (swap space more than 50% full), the system will free
swap space at swapin time.  With this patch, the system will also free the
swap space in the pageout code, when we decide that the page is not a
candidate for swapout (and just wasting swap space).

Signed-off-by: Rik van Riel <[email protected]>
Signed-off-by: Lee Schermerhorn <[email protected]>
Signed-off-by: MinChan Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Rik van Riel authored and torvalds committed Oct 20, 2008
1 parent f04e9eb commit 68a2239
Show file tree
Hide file tree
Showing 5 changed files with 60 additions and 3 deletions.
1 change: 1 addition & 0 deletions include/linux/pagevec.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec);
void __pagevec_free(struct pagevec *pvec);
void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
void pagevec_strip(struct pagevec *pvec);
void pagevec_swap_free(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
Expand Down
6 changes: 6 additions & 0 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,7 @@ extern sector_t swapdev_block(int, pgoff_t);
extern struct swap_info_struct *get_swap_info_struct(unsigned);
extern int can_share_swap_page(struct page *);
extern int remove_exclusive_swap_page(struct page *);
extern int remove_exclusive_swap_page_ref(struct page *);
struct backing_dev_info;

/* linux/mm/thrash.c */
Expand Down Expand Up @@ -353,6 +354,11 @@ static inline int remove_exclusive_swap_page(struct page *p)
return 0;
}

static inline int remove_exclusive_swap_page_ref(struct page *page)
{
return 0;
}

static inline swp_entry_t get_swap_page(void)
{
swp_entry_t entry;
Expand Down
24 changes: 24 additions & 0 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -427,6 +427,30 @@ void pagevec_strip(struct pagevec *pvec)
}
}

/**
* pagevec_swap_free - try to free swap space from the pages in a pagevec
* @pvec: pagevec with swapcache pages to free the swap space of
*
* The caller needs to hold an extra reference to each page and
* not hold the page lock on the pages. This function uses a
* trylock on the page lock so it may not always free the swap
* space associated with a page.
*/
void pagevec_swap_free(struct pagevec *pvec)
{
int i;

for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];

if (PageSwapCache(page) && trylock_page(page)) {
if (PageSwapCache(page))
remove_exclusive_swap_page_ref(page);
unlock_page(page);
}
}
}

/**
* pagevec_lookup - gang pagecache lookup
* @pvec: Where the resulting pages are placed
Expand Down
25 changes: 22 additions & 3 deletions mm/swapfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ int can_share_swap_page(struct page *page)
* Work out if there are any other processes sharing this
* swap cache page. Free it if you can. Return success.
*/
int remove_exclusive_swap_page(struct page *page)
static int remove_exclusive_swap_page_count(struct page *page, int count)
{
int retval;
struct swap_info_struct * p;
Expand All @@ -357,7 +357,7 @@ int remove_exclusive_swap_page(struct page *page)
return 0;
if (PageWriteback(page))
return 0;
if (page_count(page) != 2) /* 2: us + cache */
if (page_count(page) != count) /* us + cache + ptes */
return 0;

entry.val = page_private(page);
Expand All @@ -370,7 +370,7 @@ int remove_exclusive_swap_page(struct page *page)
if (p->swap_map[swp_offset(entry)] == 1) {
/* Recheck the page count with the swapcache lock held.. */
spin_lock_irq(&swapper_space.tree_lock);
if ((page_count(page) == 2) && !PageWriteback(page)) {
if ((page_count(page) == count) && !PageWriteback(page)) {
__delete_from_swap_cache(page);
SetPageDirty(page);
retval = 1;
Expand All @@ -387,6 +387,25 @@ int remove_exclusive_swap_page(struct page *page)
return retval;
}

/*
* Most of the time the page should have two references: one for the
* process and one for the swap cache.
*/
int remove_exclusive_swap_page(struct page *page)
{
return remove_exclusive_swap_page_count(page, 2);
}

/*
* The pageout code holds an extra reference to the page. That raises
* the reference count to test for to 2 for a page that is only in the
* swap cache plus 1 for each process that maps the page.
*/
int remove_exclusive_swap_page_ref(struct page *page)
{
return remove_exclusive_swap_page_count(page, 2 + page_mapcount(page));
}

/*
* Free the swap entry like above, but also try to
* free the page cache entry if it is the last user.
Expand Down
7 changes: 7 additions & 0 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -647,6 +647,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
continue;

activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (PageSwapCache(page) && vm_swap_full())
remove_exclusive_swap_page_ref(page);
SetPageActive(page);
pgactivate++;
keep_locked:
Expand Down Expand Up @@ -1228,6 +1231,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
pgmoved = 0;
spin_unlock_irq(&zone->lru_lock);
if (vm_swap_full())
pagevec_swap_free(&pvec);
__pagevec_release(&pvec);
spin_lock_irq(&zone->lru_lock);
}
Expand All @@ -1237,6 +1242,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
__count_zone_vm_events(PGREFILL, zone, pgscanned);
__count_vm_events(PGDEACTIVATE, pgdeactivate);
spin_unlock_irq(&zone->lru_lock);
if (vm_swap_full())
pagevec_swap_free(&pvec);

pagevec_release(&pvec);
}
Expand Down

0 comments on commit 68a2239

Please sign in to comment.