Skip to content

Commit

Permalink
mm: fold mlocked_vma_newpage() into its only call site
Browse files Browse the repository at this point in the history
In previous commit(mm: use the light version __mod_zone_page_state in
mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used.  And as
suggested by Andrew, to reduce the risks that new call sites incorrectly
using mlocked_vma_newpage() without knowing they are adding racing, this
patch folds mlocked_vma_newpage() into its only call site,
page_add_new_anon_rmap, to make it open-cocded for people to know what is
going on.

[[email protected]: coding-style fixes]
Signed-off-by: Jianyu Zhan <[email protected]>
Suggested-by: Andrew Morton <[email protected]>
Suggested-by: Hugh Dickins <[email protected]>
Acked-by: Hugh Dickins <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JianyuZhan authored and torvalds committed Jun 4, 2014
1 parent bea04b0 commit 7ee07a4
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 32 deletions.
29 changes: 0 additions & 29 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,31 +188,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}

/*
* Called only in fault path, to determine if a new page is being
* mapped into a LOCKED vma. If it is, mark page as mlocked.
*/
static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
struct page *page)
{
VM_BUG_ON_PAGE(PageLRU(page), page);

if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
return 0;

if (!TestSetPageMlocked(page)) {
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
__mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
return 1;
}

/*
* must be called with vma's mmap_sem held for read or write, and page locked.
*/
Expand Down Expand Up @@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
struct vm_area_struct *vma);
#endif
#else /* !CONFIG_MMU */
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
{
return 0;
}
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
Expand Down
20 changes: 17 additions & 3 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1032,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page,
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
hpage_nr_pages(page));
__page_set_anon_rmap(page, vma, address, 1);
if (!mlocked_vma_newpage(vma, page)) {

VM_BUG_ON_PAGE(PageLRU(page), page);
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
SetPageActive(page);
lru_cache_add(page);
} else
add_page_to_unevictable_list(page);
return;
}

if (!TestSetPageMlocked(page)) {
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
__mod_zone_page_state(page_zone(page), NR_MLOCK,
hpage_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED);
}
add_page_to_unevictable_list(page);
}

/**
Expand Down

0 comments on commit 7ee07a4

Please sign in to comment.