Skip to content

Commit

Permalink
Merge tag 'mm-nonmm-stable-2022-05-26' of git://git.kernel.org/pub/sc…
Browse files Browse the repository at this point in the history
…m/linux/kernel/git/akpm/mm

Pull misc updates from Andrew Morton:
 "The non-MM patch queue for this merge window.

  Not a lot of material this cycle. Many singleton patches against
  various subsystems. Most notably some maintenance work in ocfs2
  and initramfs"

* tag 'mm-nonmm-stable-2022-05-26' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (65 commits)
  kcov: update pos before writing pc in trace function
  ocfs2: dlmfs: fix error handling of user_dlm_destroy_lock
  ocfs2: dlmfs: don't clear USER_LOCK_ATTACHED when destroying lock
  fs/ntfs: remove redundant variable idx
  fat: remove time truncations in vfat_create/vfat_mkdir
  fat: report creation time in statx
  fat: ignore ctime updates, and keep ctime identical to mtime in memory
  fat: split fat_truncate_time() into separate functions
  MAINTAINERS: add Muchun as a memcg reviewer
  proc/sysctl: make protected_* world readable
  ia64: mca: drop redundant spinlock initialization
  tty: fix deadlock caused by calling printk() under tty_port->lock
  relay: remove redundant assignment to pointer buf
  fs/ntfs3: validate BOOT sectors_per_clusters
  lib/string_helpers: fix not adding strarray to device's resource list
  kernel/crash_core.c: remove redundant check of ck_cmdline
  ELF, uapi: fixup ELF_ST_TYPE definition
  ipc/mqueue: use get_tree_nodev() in mqueue_get_tree()
  ipc: update semtimedop() to use hrtimer
  ipc/sem: remove redundant assignments
  ...
  • Loading branch information
torvalds committed May 27, 2022
2 parents 8bdc2a1 + 3159d79 commit 6f66404
Show file tree
Hide file tree
Showing 83 changed files with 1,181 additions and 706 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5056,6 +5056,7 @@ M: Johannes Weiner <[email protected]>
M: Michal Hocko <[email protected]>
M: Roman Gushchin <[email protected]>
M: Shakeel Butt <[email protected]>
R: Muchun Song <[email protected]>
L: [email protected]
L: [email protected]
S: Maintained
Expand Down Expand Up @@ -16097,7 +16098,6 @@ F: include/asm-generic/syscall.h
F: include/linux/ptrace.h
F: include/linux/regset.h
F: include/uapi/linux/ptrace.h
F: include/uapi/linux/ptrace.h
F: kernel/ptrace.c

PULSE8-CEC DRIVER
Expand Down
1 change: 0 additions & 1 deletion arch/alpha/lib/csum_partial_copy.c
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
return 0;
return __csum_and_copy(src, dst, len);
}
EXPORT_SYMBOL(csum_and_copy_from_user);

__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len)
Expand Down
27 changes: 4 additions & 23 deletions arch/arm/kernel/crash_dump.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,22 +14,10 @@
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/uio.h>

/**
* copy_oldmem_page() - copy one page from old kernel memory
* @pfn: page frame number to be copied
* @buf: buffer where the copied page is placed
* @csize: number of bytes to copy
* @offset: offset in bytes into the page
* @userbuf: if set, @buf is int he user address space
*
* This function copies one page from old kernel memory into buffer pointed by
* @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
* copied or negative error in case of failure.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
int userbuf)
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;

Expand All @@ -40,14 +28,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;

if (userbuf) {
if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else {
memcpy(buf, vaddr + offset, csize);
}
csize = copy_to_iter(vaddr + offset, csize, iter);

iounmap(vaddr);
return csize;
Expand Down
29 changes: 4 additions & 25 deletions arch/arm64/kernel/crash_dump.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,25 +9,11 @@
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/memory.h>

/**
* copy_oldmem_page() - copy one page from old kernel memory
* @pfn: page frame number to be copied
* @buf: buffer where the copied page is placed
* @csize: number of bytes to copy
* @offset: offset in bytes into the page
* @userbuf: if set, @buf is in a user address space
*
* This function copies one page from old kernel memory into buffer pointed by
* @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
* copied or negative error in case of failure.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset,
int userbuf)
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;

Expand All @@ -38,14 +24,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!vaddr)
return -ENOMEM;

if (userbuf) {
if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
memunmap(vaddr);
return -EFAULT;
}
} else {
memcpy(buf, vaddr + offset, csize);
}
csize = copy_to_iter(vaddr + offset, csize, iter);

memunmap(vaddr);

Expand Down
32 changes: 4 additions & 28 deletions arch/ia64/kernel/crash_dump.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,42 +10,18 @@
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/crash_dump.h>

#include <linux/uio.h>
#include <asm/page.h>
#include <linux/uaccess.h>

/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*
* Calling copy_to_user() in atomic context is not desirable. Hence first
* copying the data to a pre-allocated kernel page and then copying to user
* space in non-atomic context.
*/
ssize_t
copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;

if (!csize)
return 0;
vaddr = __va(pfn<<PAGE_SHIFT);
if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) {
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
csize = copy_to_iter(vaddr + offset, csize, iter);
return csize;
}

64 changes: 32 additions & 32 deletions arch/ia64/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,38 +29,38 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};

enum instruction_type {A, I, M, F, B, L, X, u};
static enum instruction_type bundle_encoding[32][3] = {
{ M, I, I }, /* 00 */
{ M, I, I }, /* 01 */
{ M, I, I }, /* 02 */
{ M, I, I }, /* 03 */
{ M, L, X }, /* 04 */
{ M, L, X }, /* 05 */
{ u, u, u }, /* 06 */
{ u, u, u }, /* 07 */
{ M, M, I }, /* 08 */
{ M, M, I }, /* 09 */
{ M, M, I }, /* 0A */
{ M, M, I }, /* 0B */
{ M, F, I }, /* 0C */
{ M, F, I }, /* 0D */
{ M, M, F }, /* 0E */
{ M, M, F }, /* 0F */
{ M, I, B }, /* 10 */
{ M, I, B }, /* 11 */
{ M, B, B }, /* 12 */
{ M, B, B }, /* 13 */
{ u, u, u }, /* 14 */
{ u, u, u }, /* 15 */
{ B, B, B }, /* 16 */
{ B, B, B }, /* 17 */
{ M, M, B }, /* 18 */
{ M, M, B }, /* 19 */
{ u, u, u }, /* 1A */
{ u, u, u }, /* 1B */
{ M, F, B }, /* 1C */
{ M, F, B }, /* 1D */
{ u, u, u }, /* 1E */
{ u, u, u }, /* 1F */
[0x00] = { M, I, I },
[0x01] = { M, I, I },
[0x02] = { M, I, I },
[0x03] = { M, I, I },
[0x04] = { M, L, X },
[0x05] = { M, L, X },
[0x06] = { u, u, u },
[0x07] = { u, u, u },
[0x08] = { M, M, I },
[0x09] = { M, M, I },
[0x0A] = { M, M, I },
[0x0B] = { M, M, I },
[0x0C] = { M, F, I },
[0x0D] = { M, F, I },
[0x0E] = { M, M, F },
[0x0F] = { M, M, F },
[0x10] = { M, I, B },
[0x11] = { M, I, B },
[0x12] = { M, B, B },
[0x13] = { M, B, B },
[0x14] = { u, u, u },
[0x15] = { u, u, u },
[0x16] = { B, B, B },
[0x17] = { B, B, B },
[0x18] = { M, M, B },
[0x19] = { M, M, B },
[0x1A] = { u, u, u },
[0x1B] = { u, u, u },
[0x1C] = { M, F, B },
[0x1D] = { M, F, B },
[0x1E] = { u, u, u },
[0x1F] = { u, u, u },
};

/* Insert a long branch code */
Expand Down
1 change: 0 additions & 1 deletion arch/ia64/kernel/mca.c
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,6 @@ static void ia64_mlogbuf_finish(int wait)
{
BREAK_LOGLEVEL(console_loglevel);

spin_lock_init(&mlogbuf_rlock);
ia64_mlogbuf_dump();
printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
"MCA/INIT might be dodgy or fail.\n");
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/palinfo.c
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ static const char *mem_attrib[]={
* Input:
* - a pointer to a buffer to hold the string
* - a 64-bit vector
* Ouput:
* Output:
* - a pointer to the end of the buffer
*
*/
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/ptrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -2025,7 +2025,7 @@ static void syscall_get_args_cb(struct unw_frame_info *info, void *data)
* - epsinstruction: cfm is set by br.call
* locals don't exist.
*
* For both cases argguments are reachable in cfm.sof - cfm.sol.
* For both cases arguments are reachable in cfm.sof - cfm.sol.
* CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
*/
cfm = pt->cr_ifs;
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
/*
* Lower 4 bits are used as a count. Upper bits are a sequence
* number that is updated when count is reset. The cmpxchg will
* fail is seqno has changed. This minimizes mutiple cpus
* fail is seqno has changed. This minimizes multiple cpus
* resetting the count.
*/
if (current_jiffies > last.time)
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ mem_init (void)
memblock_free_all();

/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
* For fsyscall entrypoints with no light-weight handler, use the ordinary
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
* code can tell them apart.
*/
Expand Down
4 changes: 2 additions & 2 deletions arch/ia64/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ __setup("nptcg=", set_nptcg);
* override table (in which case we should ignore the value from
* PAL_VM_SUMMARY).
*
* Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
* Kernel parameter "nptcg=" overrides maximum number of simultaneous ptc.g
* purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
* we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
*
Expand Down Expand Up @@ -516,7 +516,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
if (i >= per_cpu(ia64_tr_num, cpu))
return -EBUSY;

/*Record tr info for mca hander use!*/
/*Record tr info for mca handler use!*/
if (i > per_cpu(ia64_tr_used, cpu))
per_cpu(ia64_tr_used, cpu) = i;

Expand Down
2 changes: 0 additions & 2 deletions arch/m68k/lib/checksum.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,8 +265,6 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len)
return sum;
}

EXPORT_SYMBOL(csum_and_copy_from_user);


/*
* copy from kernel space while checksumming, otherwise like csum_partial
Expand Down
27 changes: 4 additions & 23 deletions arch/mips/kernel/crash_dump.c
Original file line number Diff line number Diff line change
@@ -1,37 +1,18 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/crash_dump.h>
#include <linux/uio.h>

/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;

if (!csize)
return 0;

vaddr = kmap_local_pfn(pfn);

if (!userbuf) {
memcpy(buf, vaddr + offset, csize);
} else {
if (copy_to_user(buf, vaddr + offset, csize))
csize = -EFAULT;
}

csize = copy_to_iter(vaddr + offset, csize, iter);
kunmap_local(vaddr);

return csize;
Expand Down
35 changes: 5 additions & 30 deletions arch/powerpc/kernel/crash_dump.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
#include <asm/kdump.h>
#include <asm/prom.h>
#include <asm/firmware.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <asm/rtas.h>
#include <asm/inst.h>

Expand Down Expand Up @@ -68,33 +68,8 @@ void __init setup_kdump_trampoline(void)
}
#endif /* CONFIG_NONSTATIC_KERNEL */

static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
unsigned long offset, int userbuf)
{
if (userbuf) {
if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
return -EFAULT;
} else
memcpy(buf, (vaddr + offset), csize);

return csize;
}

/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
Expand All @@ -107,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,

if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}

Expand Down
Loading

0 comments on commit 6f66404

Please sign in to comment.