Skip to content

Commit

Permalink
mm: fix get_user_pages() vs device-dax pud mappings
Browse files Browse the repository at this point in the history
A new unit test for the device-dax 1GB enabling currently fails with
this warning before hanging the test thread:

 WARNING: CPU: 0 PID: 21 at lib/percpu-refcount.c:155 percpu_ref_switch_to_atomic_rcu+0x1e3/0x1f0
 percpu ref (dax_pmem_percpu_release [dax_pmem]) <= 0 (0) after switching to atomic
 [..]
 CPU: 0 PID: 21 Comm: rcuos/1 Tainted: G           O    4.10.0-rc7-next-20170207+ torvalds#944
 [..]
 Call Trace:
  dump_stack+0x86/0xc3
  __warn+0xcb/0xf0
  warn_slowpath_fmt+0x5f/0x80
  ? rcu_nocb_kthread+0x27a/0x510
  ? dax_pmem_percpu_exit+0x50/0x50 [dax_pmem]
  percpu_ref_switch_to_atomic_rcu+0x1e3/0x1f0
  ? percpu_ref_exit+0x60/0x60
  rcu_nocb_kthread+0x339/0x510
  ? rcu_nocb_kthread+0x27a/0x510
  kthread+0x101/0x140

The get_user_pages() path needs to arrange for references to be taken
against the dev_pagemap instance backing the pud mapping.  Refactor the
existing __gup_device_huge_pmd() to also account for the pud case.

Link: http://lkml.kernel.org/r/148653181153.38226.9605457830505509385.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <[email protected]>
Cc: Dave Jiang <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Ross Zwisler <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Nilesh Choudhury <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
djbw authored and torvalds committed Feb 25, 2017
1 parent c791ace commit 220ced1
Showing 1 changed file with 24 additions and 4 deletions.
28 changes: 24 additions & 4 deletions arch/x86/mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,14 +154,12 @@ static inline void get_head_page_multiple(struct page *page, int nr)
SetPageReferenced(page);
}

static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
static int __gup_device_huge(unsigned long pfn, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
int nr_start = *nr;
unsigned long pfn = pmd_pfn(pmd);
struct dev_pagemap *pgmap = NULL;

pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
do {
struct page *page = pfn_to_page(pfn);

Expand All @@ -180,6 +178,24 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
return 1;
}

static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;

fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
return __gup_device_huge(fault_pfn, addr, end, pages, nr);
}

static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
unsigned long end, struct page **pages, int *nr)
{
unsigned long fault_pfn;

fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
return __gup_device_huge(fault_pfn, addr, end, pages, nr);
}

static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr)
{
Expand Down Expand Up @@ -251,9 +267,13 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,

if (!pte_allows_gup(pud_val(pud), write))
return 0;

VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
if (pud_devmap(pud))
return __gup_device_huge_pud(pud, addr, end, pages, nr);

/* hugepages are never "special" */
VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
VM_BUG_ON(!pfn_valid(pud_pfn(pud)));

refs = 0;
head = pud_page(pud);
Expand Down

0 comments on commit 220ced1

Please sign in to comment.