Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge updates from Andrew Morton:

 - a few hot fixes

 - ocfs2 updates

 - almost all of -mm (slab-generic, slab, slub, kmemleak, kasan,
   cleanups, debug, pagecache, memcg, gup, pagemap, memory-hotplug,
   sparsemem, vmalloc, initialization, z3fold, compaction, mempolicy,
   oom-kill, hugetlb, migration, thp, mmap, madvise, shmem, zswap,
   zsmalloc)

* emailed patches from Andrew Morton <[email protected]>: (132 commits)
  mm/zsmalloc.c: fix a -Wunused-function warning
  zswap: do not map same object twice
  zswap: use movable memory if zpool support allocate movable memory
  zpool: add malloc_support_movable to zpool_driver
  shmem: fix obsolete comment in shmem_getpage_gfp()
  mm/madvise: reduce code duplication in error handling paths
  mm: mmap: increase sockets maximum memory size pgoff for 32bits
  mm/mmap.c: refine find_vma_prev() with rb_last()
  riscv: make mmap allocation top-down by default
  mips: use generic mmap top-down layout and brk randomization
  mips: replace arch specific way to determine 32bit task with generic version
  mips: adjust brk randomization offset to fit generic version
  mips: use STACK_TOP when computing mmap base address
  mips: properly account for stack randomization and stack guard gap
  arm: use generic mmap top-down layout and brk randomization
  arm: use STACK_TOP when computing mmap base address
  arm: properly account for stack randomization and stack guard gap
  arm64, mm: make randomization selected by generic topdown mmap layout
  arm64, mm: move generic mmap layout functions to mm
  arm64: consider stack randomization for mmap base only when necessary
  ...
  • Loading branch information
torvalds committed Sep 24, 2019
2 parents 5184d44 + 2b38d01 commit 9c9fa97
Show file tree
Hide file tree
Showing 204 changed files with 2,273 additions and 2,444 deletions.
13 changes: 9 additions & 4 deletions Documentation/ABI/testing/sysfs-kernel-slab
Original file line number Diff line number Diff line change
Expand Up @@ -429,10 +429,15 @@ KernelVersion: 2.6.22
Contact: Pekka Enberg <[email protected]>,
Christoph Lameter <[email protected]>
Description:
The shrink file is written when memory should be reclaimed from
a cache. Empty partial slabs are freed and the partial list is
sorted so the slabs with the fewest available objects are used
first.
The shrink file is used to reclaim unused slab cache
memory from a cache. Empty per-cpu or partial slabs
are freed and the partial list is sorted so the slabs
with the fewest available objects are used first.
It only accepts a value of "1" on write for shrinking
the cache. Other input values are considered invalid.
Shrinking slab caches might be expensive and can
adversely impact other running applications. So it
should be used with care.

What: /sys/kernel/slab/cache/slab_size
Date: May 2007
Expand Down
4 changes: 3 additions & 1 deletion Documentation/admin-guide/cgroup-v1/memory.rst
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,10 @@ Brief summary of control files.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node

memory.kmem.limit_in_bytes set/show hard limit for kernel memory
This knob is deprecated and shouldn't be
used. It is planned that this be removed in
the foreseeable future.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits
Expand Down
2 changes: 2 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -809,6 +809,8 @@
enables the feature at boot time. By default, it is
disabled and the system will work mostly the same as a
kernel built without CONFIG_DEBUG_PAGEALLOC.
Note: to get most of debug_pagealloc error reports, it's
useful to also enable the page_owner functionality.
on: enable the feature

debugpat [X86] Enable PAT debugging
Expand Down
11 changes: 11 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,17 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
and vice-versa 32-bit applications to call 64-bit mmap().
Required for applications doing different bitness syscalls.

# This allows to use a set of generic functions to determine mmap base
# address by giving priority to top-down scheme only if the process
# is not in legacy mode (compat task, unlimited stack size or
# sysctl_legacy_va_layout).
# Architecture that selects this option can provide its own version of:
# - STACK_RND_MASK
config ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
bool
depends on MMU
select ARCH_HAS_ELF_RANDOMIZE

config HAVE_COPY_THREAD_TLS
bool
help
Expand Down
2 changes: 0 additions & 2 deletions arch/alpha/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,4 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
free_page((unsigned long)pmd);
}

#define check_pgt_cache() do { } while (0)

#endif /* _ALPHA_PGALLOC_H */
5 changes: 0 additions & 5 deletions arch/alpha/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -359,11 +359,6 @@ extern void paging_init(void);

#include <asm-generic/pgtable.h>

/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)

/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */
#define HAVE_ARCH_UNMAPPED_AREA

Expand Down
1 change: 0 additions & 1 deletion arch/arc/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,6 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)

#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)

#define check_pgt_cache() do { } while (0)
#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))

#endif /* _ASM_ARC_PGALLOC_H */
5 changes: 0 additions & 5 deletions arch/arc/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -395,11 +395,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* to cope with aliasing VIPT cache */
#define HAVE_ARCH_UNMAPPED_AREA

/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)

#endif /* __ASSEMBLY__ */

#endif
1 change: 1 addition & 0 deletions arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ config ARM
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select BINFMT_FLAT_ARGVP_ENVP_ON_STACK
select BUILDTIME_EXTABLE_SORT if MMU
Expand Down
2 changes: 0 additions & 2 deletions arch/arm/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#define check_pgt_cache() do { } while (0)

#ifdef CONFIG_MMU

#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
Expand Down
5 changes: 0 additions & 5 deletions arch/arm/include/asm/pgtable-nommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,6 @@ typedef pte_t *pte_addr_t;
*/
extern unsigned int kobjsize(const void *objp);

/*
* No page table caches to initialise.
*/
#define pgtable_cache_init() do { } while (0)

/*
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
Expand Down
2 changes: 0 additions & 2 deletions arch/arm/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -368,8 +368,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN

#define pgtable_cache_init() do { } while (0)

#endif /* !__ASSEMBLY__ */

#endif /* CONFIG_MMU */
Expand Down
2 changes: 0 additions & 2 deletions arch/arm/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,6 @@ static inline void prefetchw(const void *ptr)
#endif
#endif

#define HAVE_ARCH_PICK_MMAP_LAYOUT

#endif

#endif /* __ASM_ARM_PROCESSOR_H */
5 changes: 0 additions & 5 deletions arch/arm/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -319,11 +319,6 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
}

#ifdef CONFIG_MMU
#ifdef CONFIG_KUSER_HELPERS
/*
Expand Down
7 changes: 3 additions & 4 deletions arch/arm/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,18 +204,17 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* coherent with the kernels mapping.
*/
if (!PageHighMem(page)) {
size_t page_size = PAGE_SIZE << compound_order(page);
__cpuc_flush_dcache_area(page_address(page), page_size);
__cpuc_flush_dcache_area(page_address(page), page_size(page));
} else {
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
for (i = 0; i < (1 << compound_order(page)); i++) {
for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_atomic(page + i);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
}
} else {
for (i = 0; i < (1 << compound_order(page)); i++) {
for (i = 0; i < compound_nr(page); i++) {
void *addr = kmap_high_get(page + i);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
Expand Down
52 changes: 0 additions & 52 deletions arch/arm/mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,33 +17,6 @@
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))

/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)

static int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;

if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;

return sysctl_legacy_va_layout;
}

static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;

if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;

return PAGE_ALIGN(TASK_SIZE - gap - rnd);
}

/*
* We need to ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches. We need to ensure that
Expand Down Expand Up @@ -171,31 +144,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}

unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;

rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);

return rnd << PAGE_SHIFT;
}

void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;

if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();

if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}

/*
* You really shouldn't be using read() or write() on /dev/mem. This
* might go away in the future.
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ config ARM64
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
Expand Down Expand Up @@ -71,6 +70,7 @@ config ARM64
select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000 || CC_IS_CLANG
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_HAS_UBSAN_SANITIZE_ALL
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/pgalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@

#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */

#define check_pgt_cache() do { } while (0)

#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))

#if CONFIG_PGTABLE_LEVELS > 2
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -861,8 +861,6 @@ extern int kern_addr_valid(unsigned long addr);

#include <asm-generic/pgtable.h>

static inline void pgtable_cache_init(void) { }

/*
* On AArch64, the cache coherency is handled via the set_pte_at() function.
*/
Expand Down
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,8 +280,6 @@ static inline void spin_lock_prefetch(const void *ptr)
"nop") : : "p" (ptr));
}

#define HAVE_ARCH_PICK_MMAP_LAYOUT

extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);

Expand Down
8 changes: 0 additions & 8 deletions arch/arm64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -557,14 +557,6 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ~0xf;
}

unsigned long arch_randomize_brk(struct mm_struct *mm)
{
if (is_compat_task())
return randomize_page(mm->brk, SZ_32M);
else
return randomize_page(mm->brk, SZ_1G);
}

/*
* Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
*/
Expand Down
3 changes: 1 addition & 2 deletions arch/arm64/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,7 @@ void __sync_icache_dcache(pte_t pte)
struct page *page = pte_page(pte);

if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
sync_icache_aliases(page_address(page), page_size(page));
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);

Expand Down
72 changes: 0 additions & 72 deletions arch/arm64/mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,78 +20,6 @@

#include <asm/cputype.h>

/*
* Leave enough space between the mmap area and the stack to honour ulimit in
* the face of randomisation.
*/
#define MIN_GAP (SZ_128M)
#define MAX_GAP (STACK_TOP/6*5)

static int mmap_is_legacy(struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;

if (rlim_stack->rlim_cur == RLIM_INFINITY)
return 1;

return sysctl_legacy_va_layout;
}

unsigned long arch_mmap_rnd(void)
{
unsigned long rnd;

#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT))
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}

static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;

/* Values close to RLIM_INFINITY can overflow. */
if (gap + pad > gap)
gap += pad;

if (gap < MIN_GAP)
gap = MIN_GAP;
else if (gap > MAX_GAP)
gap = MAX_GAP;

return PAGE_ALIGN(STACK_TOP - gap - rnd);
}

/*
* This function, called very early during the creation of a new process VM
* image, sets up which VM layout function to use:
*/
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;

if (current->flags & PF_RANDOMIZE)
random_factor = arch_mmap_rnd();

/*
* Fall back to the standard layout if the personality bit is set, or
* if the expected stack growth is unlimited:
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}

/*
* You really shouldn't be using read() or write() on /dev/mem. This might go
* away in the future.
Expand Down
Loading

0 comments on commit 9c9fa97

Please sign in to comment.