Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/p…
Browse files Browse the repository at this point in the history
…ub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
 "Nine hotfixes.

  Six for MM, three for other areas. Four of these patches address
  post-6.0 issues"

* tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  memcg: fix possible use-after-free in memcg_write_event_control()
  MAINTAINERS: update Muchun Song's email
  mm/gup: fix gup_pud_range() for dax
  mmap: fix do_brk_flags() modifying obviously incorrect VMAs
  mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit
  tmpfs: fix data loss from failed fallocate
  kselftests: cgroup: update kmem test precision tolerance
  mm: do not BUG_ON missing brk mapping, because userspace can unmap it
  mailmap: update Matti Vaittinen's email address
  • Loading branch information
torvalds committed Dec 11, 2022
2 parents 296a7b7 + 4a7ba45 commit 4cee37b
Show file tree
Hide file tree
Showing 7 changed files with 29 additions and 19 deletions.
3 changes: 3 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ Matthew Wilcox <[email protected]> <[email protected]>
Matthew Wilcox <[email protected]> <[email protected]>
Matthias Fuchs <[email protected]> <[email protected]>
Matthieu CASTET <[email protected]>
Matti Vaittinen <[email protected]> <[email protected]>
Matt Ranostay <[email protected]> <[email protected]>
Matt Ranostay <[email protected]> Matthew Ranostay <[email protected]>
Matt Ranostay <[email protected]> <[email protected]>
Expand Down Expand Up @@ -372,6 +373,8 @@ Ricardo Ribalda <[email protected]> <[email protected]>
Roman Gushchin <[email protected]> <[email protected]>
Roman Gushchin <[email protected]> <[email protected]>
Roman Gushchin <[email protected]> <[email protected]>
Muchun Song <[email protected]> <[email protected]>
Muchun Song <[email protected]> <[email protected]>
Ross Zwisler <[email protected]> <[email protected]>
Rudolf Marek <[email protected]>
Rui Saraiva <[email protected]>
Expand Down
4 changes: 2 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5299,7 +5299,7 @@ M: Johannes Weiner <[email protected]>
M: Michal Hocko <[email protected]>
M: Roman Gushchin <[email protected]>
M: Shakeel Butt <[email protected]>
R: Muchun Song <[email protected]>
R: Muchun Song <[email protected]>
L: [email protected]
L: [email protected]
S: Maintained
Expand Down Expand Up @@ -9439,7 +9439,7 @@ F: drivers/net/ethernet/huawei/hinic/

HUGETLB SUBSYSTEM
M: Mike Kravetz <[email protected]>
M: Muchun Song <[email protected]>
M: Muchun Song <[email protected]>
L: [email protected]
S: Maintained
F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
Expand Down
8 changes: 5 additions & 3 deletions include/linux/swapops.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,13 @@
* can use the extra bits to store other information besides PFN.
*/
#ifdef MAX_PHYSMEM_BITS
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
#else /* MAX_PHYSMEM_BITS */
#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
#define SWP_PFN_BITS min_t(int, \
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
SWP_TYPE_SHIFT)
#endif /* MAX_PHYSMEM_BITS */
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)

/**
* Migration swap entry specific bitfield definitions. Layout:
Expand Down
2 changes: 1 addition & 1 deletion mm/gup.c
Original file line number Diff line number Diff line change
Expand Up @@ -2852,7 +2852,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud)))
return 0;
if (unlikely(pud_huge(pud))) {
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr))
return 0;
Expand Down
14 changes: 4 additions & 10 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -226,8 +226,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/* Search one past newbrk */
mas_set(&mas, newbrk);
brkvma = mas_find(&mas, oldbrk);
BUG_ON(brkvma == NULL);
if (brkvma->vm_start >= oldbrk)
if (!brkvma || brkvma->vm_start >= oldbrk)
goto out; /* mapping intersects with an existing non-brk vma. */
/*
* mm->brk must be protected by write mmap_lock.
Expand Down Expand Up @@ -2946,9 +2945,9 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
* Expand the existing vma if possible; Note that singular lists do not
* occur after forking, so the expand will only happen on new VMAs.
*/
if (vma &&
(!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
if (vma && vma->vm_end == addr && !vma_policy(vma) &&
can_vma_merge_after(vma, flags, NULL, NULL,
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
mas_set_range(mas, vma->vm_start, addr + len - 1);
if (mas_preallocate(mas, vma, GFP_KERNEL))
return -ENOMEM;
Expand Down Expand Up @@ -3035,11 +3034,6 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
goto munmap_failed;

vma = mas_prev(&mas, 0);
if (!vma || vma->vm_end != addr || vma_policy(vma) ||
!can_vma_merge_after(vma, flags, NULL, NULL,
addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
vma = NULL;

ret = do_brk_flags(&mas, vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm);
Expand Down
11 changes: 11 additions & 0 deletions mm/shmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -948,6 +948,15 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index++;
}

/*
* When undoing a failed fallocate, we want none of the partial folio
* zeroing and splitting below, but shall want to truncate the whole
* folio when !uptodate indicates that it was added by this fallocate,
* even when [lstart, lend] covers only a part of the folio.
*/
if (unfalloc)
goto whole_folios;

same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT);
folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT);
if (folio) {
Expand All @@ -973,6 +982,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
folio_put(folio);
}

whole_folios:

index = start;
while (index < end) {
cond_resched();
Expand Down
6 changes: 3 additions & 3 deletions tools/testing/selftests/cgroup/test_kmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,12 @@


/*
* Memory cgroup charging is performed using percpu batches 32 pages
* Memory cgroup charging is performed using percpu batches 64 pages
* big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So
* the maximum discrepancy between charge and vmstat entries is number
* of cpus multiplied by 32 pages.
* of cpus multiplied by 64 pages.
*/
#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs())
#define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs())


static int alloc_dcache(const char *cgroup, void *arg)
Expand Down

0 comments on commit 4cee37b

Please sign in to comment.