Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2024-05-10-13-14' of git://git.kernel.o…
Browse files Browse the repository at this point in the history
…rg/pub/scm/linux/kernel/git/akpm/mm

Pull MM fixes from Andrew Morton:
 "18 hotfixes, 7 of which are cc:stable.

  More fixups for this cycle's page_owner updates. And a few userfaultfd
  fixes. Otherwise, random singletons - see the individual changelogs
  for details"

* tag 'mm-hotfixes-stable-2024-05-10-13-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  mailmap: add entry for Barry Song
  selftests/mm: fix powerpc ARCH check
  mailmap: add entry for John Garry
  XArray: set the marks correctly when splitting an entry
  selftests/vDSO: fix runtime errors on LoongArch
  selftests/vDSO: fix building errors on LoongArch
  mm,page_owner: don't remove __GFP_NOLOCKDEP in add_stack_record_to_list
  fs/proc/task_mmu: fix uffd-wp confusion in pagemap_scan_pmd_entry()
  fs/proc/task_mmu: fix loss of young/dirty bits during pagemap scan
  mm/vmalloc: fix return value of vb_alloc if size is 0
  mm: use memalloc_nofs_save() in page_cache_ra_order()
  kmsan: compiler_types: declare __no_sanitize_or_inline
  lib/test_xarray.c: fix error assumptions on check_xa_multi_store_adv_add()
  tools: fix userspace compilation with new test_xarray changes
  MAINTAINERS: update URL's for KEYS/KEYRINGS_INTEGRITY and TPM DEVICE DRIVER
  mm: page_owner: fix wrong information in dump_page_owner
  maple_tree: fix mas_empty_area_rev() null pointer dereference
  mm/userfaultfd: reset ptes when close() for wr-protected ones
  • Loading branch information
torvalds committed May 10, 2024
2 parents cfb4be1 + 672614a commit c22c3e0
Show file tree
Hide file tree
Showing 16 changed files with 116 additions and 64 deletions.
6 changes: 6 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,11 @@ Baolin Wang <[email protected]> <[email protected]>
Baolin Wang <[email protected]> <[email protected]>
Baolin Wang <[email protected]> <[email protected]>
Baolin Wang <[email protected]> <[email protected]>
Barry Song <[email protected]> <[email protected]>
Barry Song <[email protected]> <[email protected]>
Barry Song <[email protected]> <[email protected]>
Barry Song <[email protected]> <[email protected]>
Barry Song <[email protected]> <[email protected]>
Bart Van Assche <[email protected]> <[email protected]>
Bart Van Assche <[email protected]> <[email protected]>
Bartosz Golaszewski <[email protected]> <[email protected]>
Expand Down Expand Up @@ -304,6 +309,7 @@ Johan Hovold <[email protected]> <[email protected]>
Johan Hovold <[email protected]> <[email protected]>
John Crispin <[email protected]> <[email protected]>
John Fastabend <[email protected]> <[email protected]>
John Garry <[email protected]> <[email protected]>
John Keeping <[email protected]> <[email protected]>
John Moon <[email protected]> <[email protected]>
John Paul Adrian Glaubitz <[email protected]>
Expand Down
3 changes: 2 additions & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -12041,6 +12041,7 @@ M: Mimi Zohar <[email protected]>
L: [email protected]
L: [email protected]
S: Supported
W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
F: security/integrity/platform_certs

KFENCE
Expand Down Expand Up @@ -22409,7 +22410,7 @@ M: Jarkko Sakkinen <[email protected]>
R: Jason Gunthorpe <[email protected]>
L: [email protected]
S: Maintained
W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
W: https://gitlab.com/jarkkojs/linux-tpmdd-test
Q: https://patchwork.kernel.org/project/linux-integrity/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
F: Documentation/devicetree/bindings/tpm/
Expand Down
24 changes: 14 additions & 10 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1817,15 +1817,13 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
}

static void make_uffd_wp_pte(struct vm_area_struct *vma,
unsigned long addr, pte_t *pte)
unsigned long addr, pte_t *pte, pte_t ptent)
{
pte_t ptent = ptep_get(pte);

if (pte_present(ptent)) {
pte_t old_pte;

old_pte = ptep_modify_prot_start(vma, addr, pte);
ptent = pte_mkuffd_wp(ptent);
ptent = pte_mkuffd_wp(old_pte);
ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_mkuffd_wp(ptent);
Expand Down Expand Up @@ -2175,9 +2173,12 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
if (pte_uffd_wp(ptep_get(pte)))
pte_t ptent = ptep_get(pte);

if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
pte_swp_uffd_wp_any(ptent))
continue;
make_uffd_wp_pte(vma, addr, pte);
make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = addr + PAGE_SIZE;
Expand All @@ -2190,16 +2191,18 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
p->arg.return_mask == PAGE_IS_WRITTEN) {
for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
unsigned long next = addr + PAGE_SIZE;
pte_t ptent = ptep_get(pte);

if (pte_uffd_wp(ptep_get(pte)))
if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
pte_swp_uffd_wp_any(ptent))
continue;
ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
p, addr, &next);
if (next == addr)
break;
if (~p->arg.flags & PM_SCAN_WP_MATCHING)
continue;
make_uffd_wp_pte(vma, addr, pte);
make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
Expand All @@ -2208,8 +2211,9 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
}

for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
pte_t ptent = ptep_get(pte);
unsigned long categories = p->cur_vma_category |
pagemap_page_category(p, vma, addr, ptep_get(pte));
pagemap_page_category(p, vma, addr, ptent);
unsigned long next = addr + PAGE_SIZE;

if (!pagemap_scan_is_interesting_page(categories, p))
Expand All @@ -2224,7 +2228,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if (~categories & PAGE_IS_WRITTEN)
continue;

make_uffd_wp_pte(vma, addr, pte);
make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
Expand Down
4 changes: 4 additions & 0 deletions fs/userfaultfd.c
Original file line number Diff line number Diff line change
Expand Up @@ -895,6 +895,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
prev = vma;
continue;
}
/* Reset ptes for the whole vma range if wr-protected */
if (userfaultfd_wp(vma))
uffd_wp_range(vma, vma->vm_start,
vma->vm_end - vma->vm_start, false);
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start,
vma->vm_end, new_flags,
Expand Down
11 changes: 11 additions & 0 deletions include/linux/compiler_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,17 @@ struct ftrace_likely_data {
# define __no_kcsan
#endif

#ifdef __SANITIZE_MEMORY__
/*
* Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined
* functions, therefore disabling KMSAN checks also requires disabling inlining.
*
* __no_sanitize_or_inline effectively prevents KMSAN from reporting errors
* within the function and marks all its outputs as initialized.
*/
# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused
#endif

#ifndef __no_sanitize_or_inline
#define __no_sanitize_or_inline __always_inline
#endif
Expand Down
16 changes: 8 additions & 8 deletions lib/maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -5109,18 +5109,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
if (size == 0 || max - min < size - 1)
return -EINVAL;

if (mas_is_start(mas)) {
if (mas_is_start(mas))
mas_start(mas);
mas->offset = mas_data_end(mas);
} else if (mas->offset >= 2) {
mas->offset -= 2;
} else if (!mas_rewind_node(mas)) {
else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
return -EBUSY;
}

/* Empty set. */
if (mas_is_none(mas) || mas_is_ptr(mas))
if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
return mas_sparse_area(mas, min, max, size, false);
else if (mas->offset >= 2)
mas->offset -= 2;
else
mas->offset = mas_data_end(mas);


/* The start of the window can only be within these values. */
mas->index = min;
Expand Down
27 changes: 22 additions & 5 deletions lib/test_xarray.c
Original file line number Diff line number Diff line change
Expand Up @@ -744,15 +744,20 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa,

do {
xas_lock_irq(&xas);

xas_store(&xas, p);
XA_BUG_ON(xa, xas_error(&xas));
XA_BUG_ON(xa, xa_load(xa, index) != p);

xas_unlock_irq(&xas);
/*
* In our selftest case the only failure we can expect is for
* there not to be enough memory as we're not mimicking the
* entire page cache, so verify that's the only error we can run
* into here. The xas_nomem() which follows will ensure to fix
* that condition for us so to chug on on the loop.
*/
XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
} while (xas_nomem(&xas, GFP_KERNEL));

XA_BUG_ON(xa, xas_error(&xas));
XA_BUG_ON(xa, xa_load(xa, index) != p);
}

/* mimics page_cache_delete() */
Expand Down Expand Up @@ -1783,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order, unsigned int new_order)
{
XA_STATE_ORDER(xas, xa, index, new_order);
unsigned int i;
unsigned int i, found;
void *entry;

xa_store_order(xa, index, order, xa, GFP_KERNEL);
xa_set_mark(xa, index, XA_MARK_1);

xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
Expand All @@ -1802,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));

xas_set_order(&xas, index, 0);
found = 0;
rcu_read_lock();
xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
found++;
XA_BUG_ON(xa, xa_is_internal(entry));
}
rcu_read_unlock();
XA_BUG_ON(xa, found != 1 << (order - new_order));

xa_destroy(xa);
}

Expand Down
23 changes: 19 additions & 4 deletions lib/xarray.c
Original file line number Diff line number Diff line change
Expand Up @@ -969,16 +969,30 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
return marks;
}

static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
xa_mark_t mark)
{
int i;

if (sibs == 0)
node_mark_all(node, mark);
else {
for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
node_set_mark(node, i, mark);
}
}

static void node_set_marks(struct xa_node *node, unsigned int offset,
struct xa_node *child, unsigned int marks)
struct xa_node *child, unsigned int sibs,
unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;

for (;;) {
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
node_mark_all(child, mark);
node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
Expand Down Expand Up @@ -1077,7 +1091,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
node_set_marks(node, offset, child, marks);
node_set_marks(node, offset, child, xas->xa_sibs,
marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
Expand All @@ -1086,7 +1101,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} else {
unsigned int canon = offset - xas->xa_sibs;

node_set_marks(node, canon, NULL, marks);
node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
Expand Down
4 changes: 2 additions & 2 deletions mm/page_owner.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ static void add_stack_record_to_list(struct stack_record *stack_record,

/* Filter gfp_mask the same way stackdepot does, for consistency */
gfp_mask &= ~GFP_ZONEMASK;
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL);
gfp_mask &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
gfp_mask |= __GFP_NOWARN;

set_current_in_page_owner();
Expand Down Expand Up @@ -328,7 +328,7 @@ noinline void __set_page_owner(struct page *page, unsigned short order,
if (unlikely(!page_ext))
return;
__update_page_owner_handle(page_ext, handle, order, gfp_mask, -1,
current->pid, current->tgid, ts_nsec,
ts_nsec, current->pid, current->tgid,
current->comm);
page_ext_put(page_ext);
inc_stack_record_count(handle, gfp_mask, 1 << order);
Expand Down
4 changes: 4 additions & 0 deletions mm/readahead.c
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,7 @@ void page_cache_ra_order(struct readahead_control *ractl,
pgoff_t index = readahead_index(ractl);
pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT;
pgoff_t mark = index + ra->size - ra->async_size;
unsigned int nofs;
int err = 0;
gfp_t gfp = readahead_gfp_mask(mapping);

Expand All @@ -504,6 +505,8 @@ void page_cache_ra_order(struct readahead_control *ractl,
new_order = min_t(unsigned int, new_order, ilog2(ra->size));
}

/* See comment in page_cache_ra_unbounded() */
nofs = memalloc_nofs_save();
filemap_invalidate_lock_shared(mapping);
while (index <= limit) {
unsigned int order = new_order;
Expand All @@ -527,6 +530,7 @@ void page_cache_ra_order(struct readahead_control *ractl,

read_pages(ractl);
filemap_invalidate_unlock_shared(mapping);
memalloc_nofs_restore(nofs);

/*
* If there were already pages in the page cache, then we may have
Expand Down
2 changes: 1 addition & 1 deletion mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2710,7 +2710,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
* get_order(0) returns funny result. Just warn and terminate
* early.
*/
return NULL;
return ERR_PTR(-EINVAL);
}
order = get_order(size);

Expand Down
2 changes: 2 additions & 0 deletions tools/testing/radix-tree/linux/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
#define pr_info printk
#define pr_debug printk
#define pr_cont printk
#define schedule()
#define PAGE_SHIFT 12

#define __acquires(x)
#define __releases(x)
Expand Down
6 changes: 3 additions & 3 deletions tools/testing/selftests/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ uname_M := $(shell uname -m 2>/dev/null || echo not)
else
uname_M := $(shell echo $(CROSS_COMPILE) | grep -o '^[a-z0-9]\+')
endif
ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/powerpc/')
endif

# Without this, failed build products remain, with up-to-date timestamps,
Expand Down Expand Up @@ -98,13 +98,13 @@ TEST_GEN_FILES += $(BINARIES_64)
endif
else

ifneq (,$(findstring $(ARCH),ppc64))
ifneq (,$(findstring $(ARCH),powerpc))
TEST_GEN_FILES += protection_keys
endif

endif

ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64))
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64))
TEST_GEN_FILES += va_high_addr_switch
TEST_GEN_FILES += virtual_address_range
TEST_GEN_FILES += write_to_hugetlbfs
Expand Down
6 changes: 5 additions & 1 deletion tools/testing/selftests/vDSO/vdso_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,19 @@
#if __riscv_xlen == 32
#define VDSO_32BIT 1
#endif
#elif defined(__loongarch__)
#define VDSO_VERSION 6
#define VDSO_NAMES 1
#endif

static const char *versions[6] = {
static const char *versions[7] = {
"LINUX_2.6",
"LINUX_2.6.15",
"LINUX_2.6.29",
"LINUX_2.6.39",
"LINUX_4",
"LINUX_4.15",
"LINUX_5.10"
};

static const char *names[2][6] = {
Expand Down
Loading

0 comments on commit c22c3e0

Please sign in to comment.