Skip to content

Commit

Permalink
Sync mm-stable with mm-hotfixes-stable to pick up dependent patches
Browse files Browse the repository at this point in the history
Merge branch 'mm-hotfixes-stable' into mm-stable
  • Loading branch information
akpm00 committed Feb 1, 2023
2 parents 9a3f21f + ac86f54 commit 5ab0fc1
Show file tree
Hide file tree
Showing 28 changed files with 413 additions and 144 deletions.
1 change: 1 addition & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ Domen Puncer <[email protected]>
Douglas Gilbert <[email protected]>
Ed L. Cashin <[email protected]>
Erik Kaneda <[email protected]> <[email protected]>
Eugen Hristev <[email protected]> <[email protected]>
Evgeniy Polyakov <[email protected]>
Ezequiel Garcia <[email protected]> <[email protected]>
Felipe W Damasio <[email protected]>
Expand Down
15 changes: 6 additions & 9 deletions Documentation/admin-guide/cgroup-v2.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1245,13 +1245,17 @@ PAGE_SIZE multiple when read back.
This is a simple interface to trigger memory reclaim in the
target cgroup.

This file accepts a string which contains the number of bytes to
reclaim.
This file accepts a single key, the number of bytes to reclaim.
No nested keys are currently supported.

Example::

echo "1G" > memory.reclaim

The interface can be later extended with nested keys to
configure the reclaim behavior. For example, specify the
type of memory to reclaim from (anon, file, ..).

Please note that the kernel can over or under reclaim from
the target cgroup. If less bytes are reclaimed than the
specified amount, -EAGAIN is returned.
Expand All @@ -1263,13 +1267,6 @@ PAGE_SIZE multiple when read back.
This means that the networking layer will not adapt based on
reclaim induced by memory.reclaim.

This file also allows the user to specify the nodes to reclaim from,
via the 'nodes=' key, for example::

echo "1G nodes=0,1" > memory.reclaim

The above instructs the kernel to reclaim memory from nodes 0,1.

memory.peak
A read-only single value file which exists on non-root
cgroups.
Expand Down
7 changes: 5 additions & 2 deletions arch/ia64/kernel/sys_ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,9 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
asmlinkage long
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
{
struct timespec64 rtn_tp;
s64 tick_ns;

/*
* ia64's clock_gettime() syscall is implemented as a vdso call
* fsys_clock_gettime(). Currently it handles only
Expand All @@ -185,8 +188,8 @@ ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *
switch (which_clock) {
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
rtn_tp = ns_to_timespec64(tick_ns);
return put_timespec64(&rtn_tp, tp);
}

Expand Down
1 change: 1 addition & 0 deletions arch/sh/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
* Written by Niibe Yutaka and Paul Mundt
*/
OUTPUT_ARCH(sh)
#define RUNTIME_DISCARD_EXIT
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
Expand Down
6 changes: 1 addition & 5 deletions drivers/of/fdt.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
#include <linux/kmemleak.h>

#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
Expand Down Expand Up @@ -525,12 +524,9 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);

if (size &&
early_init_dt_reserve_memory(base, size, nomap) == 0) {
early_init_dt_reserve_memory(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
if (!nomap)
kmemleak_alloc_phys(base, size, 0);
}
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
Expand Down
2 changes: 1 addition & 1 deletion fs/freevxfs/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ config VXFS_FS
of SCO UnixWare (and possibly others) and optionally available
for Sunsoft Solaris, HP-UX and many other operating systems. However
these particular OS implementations of vxfs may differ in on-disk
data endianess and/or superblock offset. The vxfs module has been
data endianness and/or superblock offset. The vxfs module has been
tested with SCO UnixWare and HP-UX B.10.20 (pa-risc 1.1 arch.)
Currently only readonly access is supported and VxFX versions
2, 3 and 4. Tests were performed with HP-UX VxFS version 3.
Expand Down
4 changes: 1 addition & 3 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -745,9 +745,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
page = pfn_swap_entry_to_page(swpent);
}
if (page) {
int mapcount = page_mapcount(page);

if (mapcount >= 2)
if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
Expand Down
2 changes: 1 addition & 1 deletion fs/squashfs/squashfs_fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw)
#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
sizeof(u64))
/* xattr id lookup table defines */
#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id))

#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
SQUASHFS_METADATA_SIZE)
Expand Down
2 changes: 1 addition & 1 deletion fs/squashfs/squashfs_fs_sb.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ struct squashfs_sb_info {
long long bytes_used;
unsigned int inodes;
unsigned int fragments;
int xattr_ids;
unsigned int xattr_ids;
unsigned int ids;
bool panic_on_errors;
const struct squashfs_decompressor_thread_ops *thread_ops;
Expand Down
4 changes: 2 additions & 2 deletions fs/squashfs/xattr.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@

#ifdef CONFIG_SQUASHFS_XATTR
extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
u64 *, int *);
u64 *, unsigned int *);
extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
unsigned int *, unsigned long long *);
#else
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
u64 start, u64 *xattr_table_start, int *xattr_ids)
u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_xattr_id_table *id_table;

Expand Down
4 changes: 2 additions & 2 deletions fs/squashfs/xattr_id.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
u64 *xattr_table_start, int *xattr_ids)
u64 *xattr_table_start, unsigned int *xattr_ids)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
unsigned int len, indexes;
Expand All @@ -76,7 +76,7 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
/* Sanity check values */

/* there is always at least one xattr id */
if (*xattr_ids == 0)
if (*xattr_ids <= 0)
return ERR_PTR(-EINVAL);

len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
Expand Down
4 changes: 2 additions & 2 deletions include/linux/highmem-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
static inline void __kunmap_local(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
}

Expand All @@ -227,7 +227,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
static inline void __kunmap_atomic(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
#endif
pagefault_enable();
if (IS_ENABLED(CONFIG_PREEMPT_RT))
Expand Down
13 changes: 13 additions & 0 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/hugetlb_inline.h>
#include <linux/cgroup.h>
#include <linux/page_ref.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/pgtable.h>
Expand Down Expand Up @@ -1225,6 +1226,18 @@ static inline __init void hugetlb_cma_reserve(int order)
}
#endif

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return page_count(virt_to_page(pte)) > 1;
}
#else
static inline bool hugetlb_pmd_shared(pte_t *pte)
{
return false;
}
#endif

bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);

#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
Expand Down
5 changes: 4 additions & 1 deletion include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -1688,10 +1688,13 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
struct bdi_writeback *wb)
{
struct mem_cgroup *memcg;

if (mem_cgroup_disabled())
return;

if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
memcg = folio_memcg(folio);
if (unlikely(memcg && &memcg->css != wb->memcg_css))
mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
}

Expand Down
3 changes: 1 addition & 2 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -418,8 +418,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
unsigned int reclaim_options,
nodemask_t *nodemask);
unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
Expand Down
3 changes: 2 additions & 1 deletion lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -754,6 +754,7 @@ config DEBUG_KMEMLEAK
select KALLSYMS
select CRC32
select STACKDEPOT
select STACKDEPOT_ALWAYS_INIT if !DEBUG_KMEMLEAK_DEFAULT_OFF
help
Say Y here if you want to enable the memory leak
detector. The memory allocation/freeing is traced in a way
Expand Down Expand Up @@ -1207,7 +1208,7 @@ config SCHED_DEBUG
depends on DEBUG_KERNEL && PROC_FS
default y
help
If you say Y here, the /proc/sched_debug file will be provided
If you say Y here, the /sys/kernel/debug/sched file will be provided
that can help debug the scheduler. The runtime overhead of this
option is minimal.

Expand Down
22 changes: 11 additions & 11 deletions lib/maple_tree.c
Original file line number Diff line number Diff line change
Expand Up @@ -667,12 +667,13 @@ static inline unsigned long mte_pivot(const struct maple_enode *mn,
unsigned char piv)
{
struct maple_node *node = mte_to_node(mn);
enum maple_type type = mte_node_type(mn);

if (piv >= mt_pivots[piv]) {
if (piv >= mt_pivots[type]) {
WARN_ON(1);
return 0;
}
switch (mte_node_type(mn)) {
switch (type) {
case maple_arange_64:
return node->ma64.pivot[piv];
case maple_range_64:
Expand Down Expand Up @@ -4876,7 +4877,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
unsigned long *pivots, *gaps;
void __rcu **slots;
unsigned long gap = 0;
unsigned long max, min, index;
unsigned long max, min;
unsigned char offset;

if (unlikely(mas_is_err(mas)))
Expand All @@ -4898,8 +4899,7 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, --offset);

max = mas_safe_pivot(mas, pivots, offset, type);
index = mas->index;
while (index <= max) {
while (mas->index <= max) {
gap = 0;
if (gaps)
gap = gaps[offset];
Expand Down Expand Up @@ -4930,10 +4930,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
min = mas_safe_min(mas, pivots, offset);
}

if (unlikely(index > max)) {
mas_set_err(mas, -EBUSY);
return false;
}
if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
goto no_space;

if (unlikely(ma_is_leaf(type))) {
mas->offset = offset;
Expand All @@ -4950,9 +4948,11 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
return false;

ascend:
if (mte_is_root(mas->node))
mas_set_err(mas, -EBUSY);
if (!mte_is_root(mas->node))
return false;

no_space:
mas_set_err(mas, -EBUSY);
return false;
}

Expand Down
Loading

0 comments on commit 5ab0fc1

Please sign in to comment.