Skip to content

Commit

Permalink
thp: introduce khugepaged_prealloc_page and khugepaged_alloc_page
Browse files Browse the repository at this point in the history
They are used to abstract the difference between NUMA enabled and NUMA
disabled to make the code more readable

Signed-off-by: Xiao Guangrong <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Xiao Guangrong authored and torvalds committed Oct 9, 2012
1 parent 420256e commit 26234f3
Showing 1 changed file with 98 additions and 68 deletions.
166 changes: 98 additions & 68 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1827,28 +1827,34 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
}
}

static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
static void khugepaged_alloc_sleep(void)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
new_page = *hpage;
#else
#ifdef CONFIG_NUMA
static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (IS_ERR(*hpage)) {
if (!*wait)
return false;

*wait = false;
khugepaged_alloc_sleep();
} else if (*hpage) {
put_page(*hpage);
*hpage = NULL;
}

return true;
}

static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
Expand All @@ -1860,23 +1866,89 @@ static void collapse_huge_page(struct mm_struct *mm,
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
*hpage = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);

/*
* After allocating the hugepage, release the mmap_sem read lock in
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem);
if (unlikely(!new_page)) {
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
return NULL;
}
*hpage = new_page;

count_vm_event(THP_COLLAPSE_ALLOC);
return *hpage;
}
#else
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;

*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));

return hpage;
}

static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
{
if (!*hpage)
*hpage = khugepaged_alloc_hugepage(wait);

if (unlikely(!*hpage))
return false;

return true;
}

static struct page
*khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address,
int node)
{
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
return *hpage;
}
#endif

static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;

VM_BUG_ON(address & ~HPAGE_PMD_MASK);

/* release the mmap_sem read lock. */
new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
if (!new_page)
return;

if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL)))
return;

Expand Down Expand Up @@ -2215,34 +2287,6 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}

static void khugepaged_alloc_sleep(void)
{
wait_event_freezable_timeout(khugepaged_wait, false,
msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}

#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(bool *wait)
{
struct page *hpage;

do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
if (!*wait)
return NULL;

*wait = false;
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) && likely(khugepaged_enabled()));

return hpage;
}
#endif

static void khugepaged_do_scan(void)
{
struct page *hpage = NULL;
Expand All @@ -2253,23 +2297,9 @@ static void khugepaged_do_scan(void)
barrier(); /* write khugepaged_pages_to_scan to local stack */

while (progress < pages) {
#ifndef CONFIG_NUMA
if (!hpage)
hpage = khugepaged_alloc_hugepage(&wait);

if (unlikely(!hpage))
if (!khugepaged_prealloc_page(&hpage, &wait))
break;
#else
if (IS_ERR(hpage)) {
if (!wait)
break;
wait = false;
khugepaged_alloc_sleep();
} else if (hpage) {
put_page(hpage);
hpage = NULL;
}
#endif

cond_resched();

if (unlikely(kthread_should_stop() || freezing(current)))
Expand Down

0 comments on commit 26234f3

Please sign in to comment.