Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "18 patches.

  Subsystems affected by this patch series: mm (hugetlb, compaction,
  vmalloc, shmem, memblock, pagecache, kasan, and hugetlb), mailmap,
  gcov, ubsan, and MAINTAINERS"

* emailed patches from Andrew Morton <[email protected]>:
  MAINTAINERS/.mailmap: use my @kernel.org address
  mm: hugetlb: fix missing put_page in gather_surplus_pages()
  ubsan: implement __ubsan_handle_alignment_assumption
  kasan: make addr_has_metadata() return true for valid addresses
  kasan: add explicit preconditions to kasan_report()
  mm/filemap: add missing mem_cgroup_uncharge() to __add_to_page_cache_locked()
  mailmap: add entries for Manivannan Sadhasivam
  mailmap: fix name/email for Viresh Kumar
  memblock: do not start bottom-up allocations with kernel_end
  mm: thp: fix MADV_REMOVE deadlock on shmem THP
  init/gcov: allow CONFIG_CONSTRUCTORS on UML to fix module gcov
  mm/vmalloc: separate put pages and flush VM flags
  mm, compaction: move high_pfn to the for loop scope
  mm: migrate: do not migrate HugeTLB page whose refcount is one
  mm: hugetlb: remove VM_BUG_ON_PAGE from page_huge_active
  mm: hugetlb: fix a race between isolating and freeing page
  mm: hugetlb: fix a race between freeing and dissolving the page
  mm: hugetlbfs: fix cannot migrate the fallocated HugeTLB page
  • Loading branch information
torvalds committed Feb 5, 2021
2 parents 17fbcdf + 654eb3f commit 1e0d27f
Show file tree
Hide file tree
Showing 18 changed files with 153 additions and 77 deletions.
5 changes: 5 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,8 @@ Li Yang <[email protected]> <[email protected]>
Li Yang <[email protected]> <[email protected]>
Lukasz Luba <[email protected]> <[email protected]>
Maciej W. Rozycki <[email protected]> <[email protected]>
Manivannan Sadhasivam <[email protected]> <[email protected]>
Manivannan Sadhasivam <[email protected]> <[email protected]>
Marcin Nowakowski <[email protected]> <[email protected]>
Marc Zyngier <[email protected]> <[email protected]>
Mark Brown <[email protected]>
Expand Down Expand Up @@ -244,6 +246,7 @@ Morten Welinder <[email protected]>
Morten Welinder <[email protected]>
Morten Welinder <[email protected]>
Mythri P K <[email protected]>
Nathan Chancellor <[email protected]> <[email protected]>
Nguyen Anh Quynh <[email protected]>
Nicolas Ferre <[email protected]> <[email protected]>
Nicolas Pitre <[email protected]> <[email protected]>
Expand Down Expand Up @@ -334,6 +337,8 @@ Vinod Koul <[email protected]> <[email protected]>
Viresh Kumar <[email protected]> <[email protected]>
Viresh Kumar <[email protected]> <[email protected]>
Viresh Kumar <[email protected]> <[email protected]>
Viresh Kumar <[email protected]> <[email protected]>
Viresh Kumar <[email protected]> <[email protected]>
Vivien Didelot <[email protected]> <[email protected]>
Vlad Dogaru <[email protected]> <[email protected]>
Vladimir Davydov <[email protected]> <[email protected]>
Expand Down
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4304,7 +4304,7 @@ S: Maintained
F: .clang-format

CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <[email protected]>
M: Nathan Chancellor <[email protected]>
M: Nick Desaulniers <[email protected]>
L: [email protected]
S: Supported
Expand Down
3 changes: 2 additions & 1 deletion fs/hugetlbfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,

mutex_unlock(&hugetlb_fault_mutex_table[hash]);

set_page_huge_active(page);
/*
* unlock_page because locked by add_to_page_cache()
* page_put due to reference from alloc_huge_page()
* put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
put_page(page);
Expand Down
2 changes: 2 additions & 0 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif

void set_page_huge_active(struct page *page);

#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};

Expand Down
7 changes: 7 additions & 0 deletions include/linux/kasan.h
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,13 @@ static inline void *kasan_reset_tag(const void *addr)
return (void *)arch_kasan_reset_tag(addr);
}

/**
* kasan_report - print a report about a bad memory access detected by KASAN
* @addr: address of the bad access
* @size: size of the bad access
* @is_write: whether the bad access is a write or a read
* @ip: instruction pointer for the accessibility check or the bad access itself
*/
bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);

Expand Down
9 changes: 2 additions & 7 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_MAP_PUT_PAGES 0x00000100 /* put pages and free array in vfree */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */

/*
* VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
Expand All @@ -37,12 +38,6 @@ struct notifier_block; /* in notifier.h */
* determine which allocations need the module shadow freed.
*/

/*
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
* vfree_atomic().
*/
#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */

/* bits [20..32] reserved for arch specific ioremap internals */

/*
Expand Down
1 change: 0 additions & 1 deletion init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,6 @@ config CC_HAS_ASM_INLINE

config CONSTRUCTORS
bool
depends on !UML

config IRQ_WORK
bool
Expand Down
8 changes: 7 additions & 1 deletion init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1066,7 +1066,13 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
/* Call all constructor functions linked into the kernel. */
static void __init do_ctors(void)
{
#ifdef CONFIG_CONSTRUCTORS
/*
* For UML, the constructors have already been called by the
* normal setup code as it's just a normal ELF binary, so we
* cannot do it again - but we do need CONFIG_CONSTRUCTORS
* even on UML for modules.
*/
#if defined(CONFIG_CONSTRUCTORS) && !defined(CONFIG_UML)
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;

for (; fn < (ctor_fn_t *) __ctors_end; fn++)
Expand Down
2 changes: 1 addition & 1 deletion kernel/gcov/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
select CONSTRUCTORS if !UML
select CONSTRUCTORS
default n
help
This option enables gcov-based code profiling (e.g. for code coverage
Expand Down
31 changes: 31 additions & 0 deletions lib/ubsan.c
Original file line number Diff line number Diff line change
Expand Up @@ -427,3 +427,34 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);

void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
unsigned long align,
unsigned long offset);
void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
unsigned long align,
unsigned long offset)
{
struct alignment_assumption_data *data = _data;
unsigned long real_ptr;

if (suppress_report(&data->location))
return;

ubsan_prologue(&data->location, "alignment-assumption");

if (offset)
pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
align, offset, data->type->type_name);
else
pr_err("assumption of %lu byte alignment for pointer of type %s failed",
align, data->type->type_name);

real_ptr = ptr - offset;
pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
real_ptr & (align - 1));

ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
6 changes: 6 additions & 0 deletions lib/ubsan.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@ struct invalid_value_data {
struct type_descriptor *type;
};

struct alignment_assumption_data {
struct source_location location;
struct source_location assumption_location;
struct type_descriptor *type;
};

#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
Expand Down
3 changes: 2 additions & 1 deletion mm/compaction.c
Original file line number Diff line number Diff line change
Expand Up @@ -1342,7 +1342,7 @@ fast_isolate_freepages(struct compact_control *cc)
{
unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
unsigned int nr_scanned = 0;
unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
unsigned long low_pfn, min_pfn, highest = 0;
unsigned long nr_isolated = 0;
unsigned long distance;
struct page *page = NULL;
Expand Down Expand Up @@ -1387,6 +1387,7 @@ fast_isolate_freepages(struct compact_control *cc)
struct page *freepage;
unsigned long flags;
unsigned int order_scanned = 0;
unsigned long high_pfn = 0;

if (!area->nr_free)
continue;
Expand Down
4 changes: 4 additions & 0 deletions mm/filemap.c
Original file line number Diff line number Diff line change
Expand Up @@ -835,6 +835,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
XA_STATE(xas, &mapping->i_pages, offset);
int huge = PageHuge(page);
int error;
bool charged = false;

VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
Expand All @@ -848,6 +849,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
error = mem_cgroup_charge(page, current->mm, gfp);
if (error)
goto error;
charged = true;
}

gfp &= GFP_RECLAIM_MASK;
Expand Down Expand Up @@ -896,6 +898,8 @@ noinline int __add_to_page_cache_locked(struct page *page,

if (xas_error(&xas)) {
error = xas_error(&xas);
if (charged)
mem_cgroup_uncharge(page);
goto error;
}

Expand Down
37 changes: 23 additions & 14 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2202,7 +2202,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mmu_notifier_range range;
bool was_locked = false;
bool do_unlock_page = false;
pmd_t _pmd;

mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
Expand All @@ -2218,7 +2218,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
VM_BUG_ON(freeze && !page);
if (page) {
VM_WARN_ON_ONCE(!PageLocked(page));
was_locked = true;
if (page != pmd_page(*pmd))
goto out;
}
Expand All @@ -2227,19 +2226,29 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (pmd_trans_huge(*pmd)) {
if (!page) {
page = pmd_page(*pmd);
if (unlikely(!trylock_page(page))) {
get_page(page);
_pmd = *pmd;
spin_unlock(ptl);
lock_page(page);
spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, _pmd))) {
unlock_page(page);
/*
* An anonymous page must be locked, to ensure that a
* concurrent reuse_swap_page() sees stable mapcount;
* but reuse_swap_page() is not used on shmem or file,
* and page lock must not be taken when zap_pmd_range()
* calls __split_huge_pmd() while i_mmap_lock is held.
*/
if (PageAnon(page)) {
if (unlikely(!trylock_page(page))) {
get_page(page);
_pmd = *pmd;
spin_unlock(ptl);
lock_page(page);
spin_lock(ptl);
if (unlikely(!pmd_same(*pmd, _pmd))) {
unlock_page(page);
put_page(page);
page = NULL;
goto repeat;
}
put_page(page);
page = NULL;
goto repeat;
}
put_page(page);
do_unlock_page = true;
}
}
if (PageMlocked(page))
Expand All @@ -2249,7 +2258,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
out:
spin_unlock(ptl);
if (!was_locked && page)
if (do_unlock_page)
unlock_page(page);
/*
* No need to double call mmu_notifier->invalidate_range() callback.
Expand Down
Loading

0 comments on commit 1e0d27f

Please sign in to comment.