Skip to content

Commit

Permalink
Merge tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub…
Browse files Browse the repository at this point in the history
…/scm/linux/kernel/git/akpm/mm

Pull more hotfixes from Andrew Morton:
 "Seventeen hotfixes.  Mostly memory management things.

  Ten patches are cc:stable, addressing pre-6.0 issues"

* tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
  .mailmap: update Luca Ceresoli's e-mail address
  mm/mprotect: only reference swap pfn page if type match
  squashfs: don't call kmalloc in decompressors
  mm/damon/dbgfs: avoid duplicate context directory creation
  mailmap: update email address for Colin King
  asm-generic: sections: refactor memory_intersects
  bootmem: remove the vmemmap pages from kmemleak in put_page_bootmem
  ocfs2: fix freeing uninitialized resource on ocfs2_dlm_shutdown
  Revert "memcg: cleanup racy sum avoidance code"
  mm/zsmalloc: do not attempt to free IS_ERR handle
  binder_alloc: add missing mmap_lock calls when using the VMA
  mm: re-allow pinning of zero pfns (again)
  vmcoreinfo: add kallsyms_num_syms symbol
  mailmap: update Guilherme G. Piccoli's email addresses
  writeback: avoid use-after-free after removing device
  shmem: update folio if shmem_replace_page() updates the page
  mm/hugetlb: avoid corrupting page->mapping in hugetlb_mcopy_atomic_pte
  • Loading branch information
torvalds committed Aug 28, 2022
2 parents 373eff5 + 0ebafe2 commit b467192
Show file tree
Hide file tree
Showing 21 changed files with 108 additions and 60 deletions.
6 changes: 4 additions & 2 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,7 @@ Christian Brauner <[email protected]> <[email protected]>
Christian Marangi <[email protected]>
Christophe Ricard <[email protected]>
Christoph Hellwig <[email protected]>
Colin Ian King <[email protected]> <[email protected]>
Colin Ian King <[email protected]> <[email protected]>
Colin Ian King <[email protected]> <[email protected]>
Corey Minyard <[email protected]>
Damian Hobson-Garcia <[email protected]>
Daniel Borkmann <[email protected]> <[email protected]>
Expand Down Expand Up @@ -150,6 +149,8 @@ Greg Kroah-Hartman <[email protected]>
Greg Kroah-Hartman <[email protected]>
Greg Kurz <[email protected]> <[email protected]>
Gregory CLEMENT <[email protected]> <[email protected]>
Guilherme G. Piccoli <[email protected]> <[email protected]>
Guilherme G. Piccoli <[email protected]> <[email protected]>
Guo Ren <[email protected]> <[email protected]>
Guo Ren <[email protected]> <[email protected]>
Gustavo Padovan <[email protected]>
Expand Down Expand Up @@ -253,6 +254,7 @@ Linus Lüssing <[email protected]> <[email protected]>
Li Yang <[email protected]> <[email protected]>
Li Yang <[email protected]> <[email protected]>
Lorenzo Pieralisi <[email protected]> <[email protected]>
Luca Ceresoli <[email protected]> <[email protected]>
Lukasz Luba <[email protected]> <[email protected]>
Maciej W. Rozycki <[email protected]> <[email protected]>
Maciej W. Rozycki <[email protected]> <[email protected]>
Expand Down
31 changes: 21 additions & 10 deletions drivers/android/binder_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -402,12 +402,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;

mmap_read_lock(alloc->vma_vm_mm);
if (!binder_alloc_get_vma(alloc)) {
mmap_read_unlock(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
return ERR_PTR(-ESRCH);
}
mmap_read_unlock(alloc->vma_vm_mm);

data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
Expand Down Expand Up @@ -929,17 +932,25 @@ void binder_alloc_print_pages(struct seq_file *m,
* Make sure the binder_alloc is fully initialized, otherwise we might
* read inconsistent state.
*/
if (binder_alloc_get_vma(alloc) != NULL) {
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}

mmap_read_lock(alloc->vma_vm_mm);
if (binder_alloc_get_vma(alloc) == NULL) {
mmap_read_unlock(alloc->vma_vm_mm);
goto uninitialized;
}

mmap_read_unlock(alloc->vma_vm_mm);
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = &alloc->pages[i];
if (!page->page_ptr)
free++;
else if (list_empty(&page->lru))
active++;
else
lru++;
}

uninitialized:
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
Expand Down
12 changes: 6 additions & 6 deletions fs/fs-writeback.c
Original file line number Diff line number Diff line change
Expand Up @@ -134,10 +134,10 @@ static bool inode_io_list_move_locked(struct inode *inode,

static void wb_wakeup(struct bdi_writeback *wb)
{
spin_lock_bh(&wb->work_lock);
spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
mod_delayed_work(bdi_wq, &wb->dwork, 0);
spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);
}

static void finish_writeback_work(struct bdi_writeback *wb,
Expand All @@ -164,15 +164,15 @@ static void wb_queue_work(struct bdi_writeback *wb,
if (work->done)
atomic_inc(&work->done->cnt);

spin_lock_bh(&wb->work_lock);
spin_lock_irq(&wb->work_lock);

if (test_bit(WB_registered, &wb->state)) {
list_add_tail(&work->list, &wb->work_list);
mod_delayed_work(bdi_wq, &wb->dwork, 0);
} else
finish_writeback_work(wb, work);

spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);
}

/**
Expand Down Expand Up @@ -2082,13 +2082,13 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
{
struct wb_writeback_work *work = NULL;

spin_lock_bh(&wb->work_lock);
spin_lock_irq(&wb->work_lock);
if (!list_empty(&wb->work_list)) {
work = list_entry(wb->work_list.next,
struct wb_writeback_work, list);
list_del_init(&work->list);
}
spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);
return work;
}

Expand Down
8 changes: 5 additions & 3 deletions fs/ocfs2/dlmglue.c
Original file line number Diff line number Diff line change
Expand Up @@ -3403,10 +3403,12 @@ void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);

ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;
if (osb->cconn) {
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;

ocfs2_dlm_shutdown_debug(osb);
ocfs2_dlm_shutdown_debug(osb);
}
}

static int ocfs2_drop_lock(struct ocfs2_super *osb,
Expand Down
3 changes: 1 addition & 2 deletions fs/ocfs2/super.c
Original file line number Diff line number Diff line change
Expand Up @@ -1914,8 +1914,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
!ocfs2_is_hard_readonly(osb))
hangup_needed = 1;

if (osb->cconn)
ocfs2_dlm_shutdown(osb, hangup_needed);
ocfs2_dlm_shutdown(osb, hangup_needed);

ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats);
debugfs_remove_recursive(osb->osb_debug_root);
Expand Down
2 changes: 1 addition & 1 deletion fs/squashfs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ static void squashfs_readahead(struct readahead_control *ractl)

res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);

kfree(actor);
squashfs_page_actor_free(actor);

if (res == expected) {
int bytes;
Expand Down
2 changes: 1 addition & 1 deletion fs/squashfs/file_direct.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
/* Decompress directly into the page cache buffers */
res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor);

kfree(actor);
squashfs_page_actor_free(actor);

if (res < 0)
goto mark_errored;
Expand Down
34 changes: 15 additions & 19 deletions fs/squashfs/page_actor.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
actor->buffer = buffer;
actor->pages = pages;
actor->next_page = 0;
actor->tmp_buffer = NULL;
actor->squashfs_first_page = cache_first_page;
actor->squashfs_next_page = cache_next_page;
actor->squashfs_finish_page = cache_finish_page;
Expand All @@ -68,20 +69,9 @@ static void *handle_next_page(struct squashfs_page_actor *actor)

if ((actor->next_page == actor->pages) ||
(actor->next_index != actor->page[actor->next_page]->index)) {
if (actor->alloc_buffer) {
void *tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);

if (tmp_buffer) {
actor->tmp_buffer = tmp_buffer;
actor->next_index++;
actor->returned_pages++;
return tmp_buffer;
}
}

actor->next_index++;
actor->returned_pages++;
return ERR_PTR(-ENOMEM);
return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM);
}

actor->next_index++;
Expand All @@ -96,11 +86,10 @@ static void *direct_first_page(struct squashfs_page_actor *actor)

static void *direct_next_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
if (actor->pageaddr) {
kunmap_local(actor->pageaddr);

kfree(actor->tmp_buffer);
actor->pageaddr = actor->tmp_buffer = NULL;
actor->pageaddr = NULL;
}

return handle_next_page(actor);
}
Expand All @@ -109,8 +98,6 @@ static void direct_finish_page(struct squashfs_page_actor *actor)
{
if (actor->pageaddr)
kunmap_local(actor->pageaddr);

kfree(actor->tmp_buffer);
}

struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk,
Expand All @@ -121,14 +108,23 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_
if (actor == NULL)
return NULL;

if (msblk->decompressor->alloc_buffer) {
actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);

if (actor->tmp_buffer == NULL) {
kfree(actor);
return NULL;
}
} else
actor->tmp_buffer = NULL;

actor->length = length ? : pages * PAGE_SIZE;
actor->page = page;
actor->pages = pages;
actor->next_page = 0;
actor->returned_pages = 0;
actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1);
actor->pageaddr = NULL;
actor->tmp_buffer = NULL;
actor->alloc_buffer = msblk->decompressor->alloc_buffer;
actor->squashfs_first_page = direct_first_page;
actor->squashfs_next_page = direct_next_page;
Expand Down
5 changes: 5 additions & 0 deletions fs/squashfs/page_actor.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ extern struct squashfs_page_actor *squashfs_page_actor_init(void **buffer,
extern struct squashfs_page_actor *squashfs_page_actor_init_special(
struct squashfs_sb_info *msblk,
struct page **page, int pages, int length);
static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor)
{
kfree(actor->tmp_buffer);
kfree(actor);
}
static inline void *squashfs_first_page(struct squashfs_page_actor *actor)
{
return actor->squashfs_first_page(actor);
Expand Down
7 changes: 5 additions & 2 deletions include/asm-generic/sections.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
/**
* memory_intersects - checks if the region occupied by an object intersects
* with another memory region
* @begin: virtual address of the beginning of the memory regien
* @begin: virtual address of the beginning of the memory region
* @end: virtual address of the end of the memory region
* @virt: virtual address of the memory object
* @size: size of the memory object
Expand All @@ -110,7 +110,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
{
void *vend = virt + size;

return (virt >= begin && virt < end) || (vend >= begin && vend < end);
if (virt < end && vend > begin)
return true;

return false;
}

/**
Expand Down
15 changes: 13 additions & 2 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -987,19 +987,30 @@ static inline void mod_memcg_page_state(struct page *page,

static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return READ_ONCE(memcg->vmstats.state[idx]);
long x = READ_ONCE(memcg->vmstats.state[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}

static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
long x;

if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);

pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return READ_ONCE(pn->lruvec_stats.state[idx]);
x = READ_ONCE(pn->lruvec_stats.state[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
#endif
return x;
}

static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
Expand Down
13 changes: 10 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1544,9 +1544,16 @@ static inline bool is_longterm_pinnable_page(struct page *page)
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
return false;
#endif
return !(is_device_coherent_page(page) ||
is_zone_movable_page(page) ||
is_zero_pfn(page_to_pfn(page)));
/* The zero page may always be pinned */
if (is_zero_pfn(page_to_pfn(page)))
return true;

/* Coherent device memory must always allow eviction. */
if (is_device_coherent_page(page))
return false;

/* Otherwise, non-movable zone pages can be pinned. */
return !is_zone_movable_page(page);
}
#else
static inline bool is_longterm_pinnable_page(struct page *page)
Expand Down
1 change: 1 addition & 0 deletions kernel/crash_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -494,6 +494,7 @@ static int __init crash_save_vmcoreinfo_init(void)

#ifdef CONFIG_KALLSYMS
VMCOREINFO_SYMBOL(kallsyms_names);
VMCOREINFO_SYMBOL(kallsyms_num_syms);
VMCOREINFO_SYMBOL(kallsyms_token_table);
VMCOREINFO_SYMBOL(kallsyms_token_index);
#ifdef CONFIG_KALLSYMS_BASE_RELATIVE
Expand Down
10 changes: 5 additions & 5 deletions mm/backing-dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,10 +260,10 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
unsigned long timeout;

timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
spin_lock_bh(&wb->work_lock);
spin_lock_irq(&wb->work_lock);
if (test_bit(WB_registered, &wb->state))
queue_delayed_work(bdi_wq, &wb->dwork, timeout);
spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);
}

static void wb_update_bandwidth_workfn(struct work_struct *work)
Expand Down Expand Up @@ -334,12 +334,12 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
static void wb_shutdown(struct bdi_writeback *wb)
{
/* Make sure nobody queues further work */
spin_lock_bh(&wb->work_lock);
spin_lock_irq(&wb->work_lock);
if (!test_and_clear_bit(WB_registered, &wb->state)) {
spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);
return;
}
spin_unlock_bh(&wb->work_lock);
spin_unlock_irq(&wb->work_lock);

cgwb_remove_from_bdi_list(wb);
/*
Expand Down
2 changes: 2 additions & 0 deletions mm/bootmem_info.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include <linux/memblock.h>
#include <linux/bootmem_info.h>
#include <linux/memory_hotplug.h>
#include <linux/kmemleak.h>

void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
{
Expand All @@ -33,6 +34,7 @@ void put_page_bootmem(struct page *page)
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
free_reserved_page(page);
}
}
Expand Down
3 changes: 3 additions & 0 deletions mm/damon/dbgfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -818,6 +818,9 @@ static int dbgfs_mk_context(char *name)
return -ENOENT;

new_dir = debugfs_create_dir(name, root);
/* Below check is required for a potential duplicated name case */
if (IS_ERR(new_dir))
return PTR_ERR(new_dir);
dbgfs_dirs[dbgfs_nr_ctxs] = new_dir;

new_ctx = dbgfs_new_ctx();
Expand Down
Loading

0 comments on commit b467192

Please sign in to comment.