Skip to content

Commit

Permalink
Merge tag 'libnvdimm-for-4.19_dax-memory-failure' of gitolite.kernel.…
Browse files Browse the repository at this point in the history
…org:pub/scm/linux/kernel/git/nvdimm/nvdimm

Pull libnvdimm memory-failure update from Dave Jiang:
 "As it stands, memory_failure() gets thoroughly confused by dev_pagemap
  backed mappings. The recovery code has specific enabling for several
  possible page states and needs new enabling to handle poison in dax
  mappings.

  In order to support reliable reverse mapping of user space addresses:

   1/ Add new locking in the memory_failure() rmap path to prevent races
      that would typically be handled by the page lock.

   2/ Since dev_pagemap pages are hidden from the page allocator and the
      "compound page" accounting machinery, add a mechanism to determine
      the size of the mapping that encompasses a given poisoned pfn.

   3/ Given pmem errors can be repaired, change the speculatively
      accessed poison protection, mce_unmap_kpfn(), to be reversible and
      otherwise allow ongoing access from the kernel.

  A side effect of this enabling is that MADV_HWPOISON becomes usable
  for dax mappings, however the primary motivation is to allow the
  system to survive userspace consumption of hardware-poison via dax.
  Specifically the current behavior is:

     mce: Uncorrected hardware memory error in user-access at af34214200
     {1}[Hardware Error]: It has been corrected by h/w and requires no further action
     mce: [Hardware Error]: Machine check events logged
     {1}[Hardware Error]: event severity: corrected
     Memory failure: 0xaf34214: reserved kernel page still referenced by 1 users
     [..]
     Memory failure: 0xaf34214: recovery action for reserved kernel page: Failed
     mce: Memory error not recovered
     <reboot>

  ...and with these changes:

     Injecting memory failure for pfn 0x20cb00 at process virtual address 0x7f763dd00000
     Memory failure: 0x20cb00: Killing dax-pmd:5421 due to hardware memory corruption
     Memory failure: 0x20cb00: recovery action for dax page: Recovered

  Given all the cross dependencies I propose taking this through
  nvdimm.git with acks from Naoya, x86/core, x86/RAS, and of course dax
  folks"

* tag 'libnvdimm-for-4.19_dax-memory-failure' of gitolite.kernel.org:pub/scm/linux/kernel/git/nvdimm/nvdimm:
  libnvdimm, pmem: Restore page attributes when clearing errors
  x86/memory_failure: Introduce {set, clear}_mce_nospec()
  x86/mm/pat: Prepare {reserve, free}_memtype() for "decoy" addresses
  mm, memory_failure: Teach memory_failure() about dev_pagemap pages
  filesystem-dax: Introduce dax_lock_mapping_entry()
  mm, memory_failure: Collect mapping size in collect_procs()
  mm, madvise_inject_error: Let memory_failure() optionally take a page reference
  mm, dev_pagemap: Do not clear ->mapping on final put
  mm, madvise_inject_error: Disable MADV_SOFT_OFFLINE for ZONE_DEVICE pages
  filesystem-dax: Set page->index
  device-dax: Set page->index
  device-dax: Enable page_mapping()
  device-dax: Convert to vmf_insert_mixed and vm_fault_t
  • Loading branch information
torvalds committed Aug 26, 2018
2 parents 828bf6e + c953cc9 commit 2923b27
Show file tree
Hide file tree
Showing 17 changed files with 481 additions and 135 deletions.
42 changes: 42 additions & 0 deletions arch/x86/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,46 @@ extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);

#ifdef CONFIG_X86_64
static inline int set_mce_nospec(unsigned long pfn)
{
unsigned long decoy_addr;
int rc;

/*
* Mark the linear address as UC to make sure we don't log more
* errors because of speculative access to the page.
* We would like to just call:
* set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_uc() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}
#define set_mce_nospec set_mce_nospec

/* Restore full speculative operation to the pfn. */
static inline int clear_mce_nospec(unsigned long pfn)
{
return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
}
#define clear_mce_nospec clear_mce_nospec
#else
/*
* Few people would run a 32-bit kernel on a machine that supports
* recoverable errors because they have too much memory to boot 32-bit.
*/
#endif

#endif /* _ASM_X86_SET_MEMORY_H */
15 changes: 0 additions & 15 deletions arch/x86/kernel/cpu/mcheck/mce-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,21 +113,6 @@ static inline void mce_register_injector_chain(struct notifier_block *nb) { }
static inline void mce_unregister_injector_chain(struct notifier_block *nb) { }
#endif

#ifndef CONFIG_X86_64
/*
* On 32-bit systems it would be difficult to safely unmap a poison page
* from the kernel 1:1 map because there are no non-canonical addresses that
* we can use to refer to the address without risking a speculative access.
* However, this isn't much of an issue because:
* 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which
* are only mapped into the kernel as needed
* 2) Few people would run a 32-bit kernel on a machine that supports
* recoverable errors because they have too much memory to boot 32-bit.
*/
static inline void mce_unmap_kpfn(unsigned long pfn) {}
#define mce_unmap_kpfn mce_unmap_kpfn
#endif

struct mca_config {
bool dont_log_ce;
bool cmci_disabled;
Expand Down
38 changes: 3 additions & 35 deletions arch/x86/kernel/cpu/mcheck/mce.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include <linux/irq_work.h>
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/set_memory.h>

#include <asm/intel-family.h>
#include <asm/processor.h>
Expand All @@ -50,7 +51,6 @@
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/reboot.h>
#include <asm/set_memory.h>

#include "mce-internal.h"

Expand Down Expand Up @@ -108,10 +108,6 @@ static struct irq_work mce_irq_work;

static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);

#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn);
#endif

/*
* CPU/chipset specific EDAC code can register a notifier call here to print
* MCE errors in a human-readable form.
Expand Down Expand Up @@ -602,7 +598,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val,
if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) {
pfn = mce->addr >> PAGE_SHIFT;
if (!memory_failure(pfn, 0))
mce_unmap_kpfn(pfn);
set_mce_nospec(pfn);
}

return NOTIFY_OK;
Expand Down Expand Up @@ -1072,38 +1068,10 @@ static int do_memory_failure(struct mce *m)
if (ret)
pr_err("Memory error not recovered");
else
mce_unmap_kpfn(m->addr >> PAGE_SHIFT);
set_mce_nospec(m->addr >> PAGE_SHIFT);
return ret;
}

#ifndef mce_unmap_kpfn
static void mce_unmap_kpfn(unsigned long pfn)
{
unsigned long decoy_addr;

/*
* Unmap this page from the kernel 1:1 mappings to make sure
* we don't log more errors because of speculative access to
* the page.
* We would like to just call:
* set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_np() not checking whether we passed
* a legal address.
*/

decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

if (set_memory_np(decoy_addr, 1))
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
}
#endif


/*
* Cases where we avoid rendezvous handler timeout:
Expand Down
16 changes: 16 additions & 0 deletions arch/x86/mm/pat.c
Original file line number Diff line number Diff line change
Expand Up @@ -512,6 +512,17 @@ static int free_ram_pages_type(u64 start, u64 end)
return 0;
}

static u64 sanitize_phys(u64 address)
{
/*
* When changing the memtype for pages containing poison allow
* for a "decoy" virtual address (bit 63 clear) passed to
* set_memory_X(). __pa() on a "decoy" address results in a
* physical address with bit 63 set.
*/
return address & __PHYSICAL_MASK;
}

/*
* req_type typically has one of the:
* - _PAGE_CACHE_MODE_WB
Expand All @@ -533,6 +544,8 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
int is_range_ram;
int err = 0;

start = sanitize_phys(start);
end = sanitize_phys(end);
BUG_ON(start >= end); /* end is exclusive */

if (!pat_enabled()) {
Expand Down Expand Up @@ -609,6 +622,9 @@ int free_memtype(u64 start, u64 end)
if (!pat_enabled())
return 0;

start = sanitize_phys(start);
end = sanitize_phys(end);

/* Low ISA region is always mapped WB. No need to track */
if (x86_platform.is_untracked_pat_range(start, end))
return 0;
Expand Down
75 changes: 48 additions & 27 deletions drivers/dax/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,13 +248,12 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}

static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf, pfn_t *pfn)
{
struct device *dev = &dev_dax->dev;
struct dax_region *dax_region;
int rc = VM_FAULT_SIGBUS;
phys_addr_t phys;
pfn_t pfn;
unsigned int fault_size = PAGE_SIZE;

if (check_vma(dev_dax, vmf->vma, __func__))
Expand All @@ -276,26 +275,19 @@ static int __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}

pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);

rc = vm_insert_mixed(vmf->vma, vmf->address, pfn);

if (rc == -ENOMEM)
return VM_FAULT_OOM;
if (rc < 0 && rc != -EBUSY)
return VM_FAULT_SIGBUS;
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);

return VM_FAULT_NOPAGE;
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
}

static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf, pfn_t *pfn)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dev_dax->dev;
struct dax_region *dax_region;
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
unsigned int fault_size = PMD_SIZE;

if (check_vma(dev_dax, vmf->vma, __func__))
Expand Down Expand Up @@ -331,21 +323,21 @@ static int __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}

pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);

return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn,
return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
vmf->flags & FAULT_FLAG_WRITE);
}

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf, pfn_t *pfn)
{
unsigned long pud_addr = vmf->address & PUD_MASK;
struct device *dev = &dev_dax->dev;
struct dax_region *dax_region;
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
unsigned int fault_size = PUD_SIZE;


Expand Down Expand Up @@ -382,23 +374,26 @@ static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}

pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
*pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);

return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn,
return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
vmf->flags & FAULT_FLAG_WRITE);
}
#else
static int __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf)
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
struct vm_fault *vmf, pfn_t *pfn)
{
return VM_FAULT_FALLBACK;
}
#endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */

static int dev_dax_huge_fault(struct vm_fault *vmf,
static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
int rc, id;
struct file *filp = vmf->vma->vm_file;
unsigned long fault_size;
int rc, id;
pfn_t pfn;
struct dev_dax *dev_dax = filp->private_data;

dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
Expand All @@ -408,23 +403,49 @@ static int dev_dax_huge_fault(struct vm_fault *vmf,
id = dax_read_lock();
switch (pe_size) {
case PE_SIZE_PTE:
rc = __dev_dax_pte_fault(dev_dax, vmf);
fault_size = PAGE_SIZE;
rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
break;
case PE_SIZE_PMD:
rc = __dev_dax_pmd_fault(dev_dax, vmf);
fault_size = PMD_SIZE;
rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
break;
case PE_SIZE_PUD:
rc = __dev_dax_pud_fault(dev_dax, vmf);
fault_size = PUD_SIZE;
rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
break;
default:
rc = VM_FAULT_SIGBUS;
}

if (rc == VM_FAULT_NOPAGE) {
unsigned long i;
pgoff_t pgoff;

/*
* In the device-dax case the only possibility for a
* VM_FAULT_NOPAGE result is when device-dax capacity is
* mapped. No need to consider the zero page, or racing
* conflicting mappings.
*/
pgoff = linear_page_index(vmf->vma, vmf->address
& ~(fault_size - 1));
for (i = 0; i < fault_size / PAGE_SIZE; i++) {
struct page *page;

page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
if (page->mapping)
continue;
page->mapping = filp->f_mapping;
page->index = pgoff + i;
}
}
dax_read_unlock(id);

return rc;
}

static int dev_dax_fault(struct vm_fault *vmf)
static vm_fault_t dev_dax_fault(struct vm_fault *vmf)
{
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
}
Expand Down
26 changes: 26 additions & 0 deletions drivers/nvdimm/pmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <linux/hdreg.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/set_memory.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/badblocks.h>
Expand Down Expand Up @@ -51,6 +52,30 @@ static struct nd_region *to_region(struct pmem_device *pmem)
return to_nd_region(to_dev(pmem)->parent);
}

static void hwpoison_clear(struct pmem_device *pmem,
phys_addr_t phys, unsigned int len)
{
unsigned long pfn_start, pfn_end, pfn;

/* only pmem in the linear map supports HWPoison */
if (is_vmalloc_addr(pmem->virt_addr))
return;

pfn_start = PHYS_PFN(phys);
pfn_end = pfn_start + PHYS_PFN(len);
for (pfn = pfn_start; pfn < pfn_end; pfn++) {
struct page *page = pfn_to_page(pfn);

/*
* Note, no need to hold a get_dev_pagemap() reference
* here since we're in the driver I/O path and
* outstanding I/O requests pin the dev_pagemap.
*/
if (test_and_clear_pmem_poison(page))
clear_mce_nospec(pfn);
}
}

static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
phys_addr_t offset, unsigned int len)
{
Expand All @@ -65,6 +90,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
if (cleared < len)
rc = BLK_STS_IOERR;
if (cleared > 0 && cleared / 512) {
hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
cleared /= 512;
dev_dbg(dev, "%#llx clear %ld sector%s\n",
(unsigned long long) sector, cleared,
Expand Down
Loading

0 comments on commit 2923b27

Please sign in to comment.