Skip to content

Commit

Permalink
mm/execmem, arch: convert remaining overrides of module_alloc to execmem
Browse files Browse the repository at this point in the history
Extend execmem parameters to accommodate more complex overrides of
module_alloc() by architectures.

This includes specification of a fallback range required by arm, arm64
and powerpc, EXECMEM_MODULE_DATA type required by powerpc, support for
allocation of KASAN shadow required by s390 and x86 and support for
late initialization of execmem required by arm64.

The core implementation of execmem_alloc() takes care of suppressing
warnings when the initial allocation fails but there is a fallback range
defined.

Signed-off-by: Mike Rapoport (IBM) <[email protected]>
Acked-by: Will Deacon <[email protected]>
Acked-by: Song Liu <[email protected]>
Tested-by: Liviu Dudau <[email protected]>
Signed-off-by: Luis Chamberlain <[email protected]>
  • Loading branch information
rppt authored and mcgrof committed May 14, 2024
1 parent f6bec26 commit 223b5e5
Show file tree
Hide file tree
Showing 11 changed files with 246 additions and 185 deletions.
8 changes: 8 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -977,6 +977,14 @@ config ARCH_WANTS_MODULES_DATA_IN_VMALLOC
For architectures like powerpc/32 which have constraints on module
allocation and need to allocate module data outside of module area.

config ARCH_WANTS_EXECMEM_LATE
bool
help
For architectures that do not allocate executable memory early on
boot, but rather require its initialization late when there is
enough entropy for module space randomization, for instance
arm64.

config HAVE_IRQ_EXIT_ON_IRQ_STACK
bool
help
Expand Down
41 changes: 25 additions & 16 deletions arch/arm/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/execmem.h>

#include <asm/sections.h>
#include <asm/smp_plat.h>
Expand All @@ -34,23 +35,31 @@
#endif

#ifdef CONFIG_MMU
void *module_alloc(unsigned long size)
static struct execmem_info execmem_info __ro_after_init;

struct execmem_info __init *execmem_arch_setup(void)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;

/* Silence the initial allocation */
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;

p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
return p;
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
unsigned long fallback_start = 0, fallback_end = 0;

if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
fallback_start = VMALLOC_START;
fallback_end = VMALLOC_END;
}

execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
.start = MODULES_VADDR,
.end = MODULES_END,
.pgprot = PAGE_KERNEL_EXEC,
.alignment = 1,
.fallback_start = fallback_start,
.fallback_end = fallback_end,
},
},
};

return &execmem_info;
}
#endif

Expand Down
1 change: 1 addition & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ config ARM64
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_EXECMEM_LATE if EXECMEM
select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
select ARCH_HAS_UBSAN
Expand Down
55 changes: 31 additions & 24 deletions arch/arm64/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include <linux/random.h>
#include <linux/scs.h>
#include <linux/vmalloc.h>
#include <linux/execmem.h>

#include <asm/alternative.h>
#include <asm/insn.h>
Expand Down Expand Up @@ -108,41 +109,47 @@ static int __init module_init_limits(void)

return 0;
}
subsys_initcall(module_init_limits);

void *module_alloc(unsigned long size)
static struct execmem_info execmem_info __ro_after_init;

struct execmem_info __init *execmem_arch_setup(void)
{
void *p = NULL;
unsigned long fallback_start = 0, fallback_end = 0;
unsigned long start = 0, end = 0;

module_init_limits();

/*
* Where possible, prefer to allocate within direct branch range of the
* kernel such that no PLTs are necessary.
*/
if (module_direct_base) {
p = __vmalloc_node_range(size, MODULE_ALIGN,
module_direct_base,
module_direct_base + SZ_128M,
GFP_KERNEL | __GFP_NOWARN,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
start = module_direct_base;
end = module_direct_base + SZ_128M;

if (!p && module_plt_base) {
p = __vmalloc_node_range(size, MODULE_ALIGN,
module_plt_base,
module_plt_base + SZ_2G,
GFP_KERNEL | __GFP_NOWARN,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}

if (!p) {
pr_warn_ratelimited("%s: unable to allocate memory\n",
__func__);
if (module_plt_base) {
fallback_start = module_plt_base;
fallback_end = module_plt_base + SZ_2G;
}
} else if (module_plt_base) {
start = module_plt_base;
end = module_plt_base + SZ_2G;
}

/* Memory is intended to be executable, reset the pointer tag. */
return kasan_reset_tag(p);
execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
.start = start,
.end = end,
.pgprot = PAGE_KERNEL,
.alignment = 1,
.fallback_start = fallback_start,
.fallback_end = fallback_end,
},
},
};

return &execmem_info;
}

enum aarch64_reloc_op {
Expand Down
60 changes: 39 additions & 21 deletions arch/powerpc/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/bug.h>
#include <linux/execmem.h>
#include <asm/module.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
Expand Down Expand Up @@ -89,39 +90,56 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}

static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
static struct execmem_info execmem_info __ro_after_init;

struct execmem_info __init *execmem_arch_setup(void)
{
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
unsigned long fallback_start = 0, fallback_end = 0;
unsigned long start, end;

/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
* BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and
* allow allocating data in the entire vmalloc space
*/
return __vmalloc_node_range(size, 1, start, end, gfp, prot,
VM_FLUSH_RESET_PERMS,
NUMA_NO_NODE, __builtin_return_address(0));
}

void *module_alloc(unsigned long size)
{
#ifdef MODULES_VADDR
unsigned long limit = (unsigned long)_etext - SZ_32M;
void *ptr = NULL;

BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);

/* First try within 32M limit from _etext to avoid branch trampolines */
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
ptr = __module_alloc(size, limit, MODULES_END, true);

if (!ptr)
ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
start = limit;
fallback_start = MODULES_VADDR;
fallback_end = MODULES_END;
} else {
start = MODULES_VADDR;
}

return ptr;
end = MODULES_END;
#else
return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
start = VMALLOC_START;
end = VMALLOC_END;
#endif

execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
.start = start,
.end = end,
.pgprot = prot,
.alignment = 1,
.fallback_start = fallback_start,
.fallback_end = fallback_end,
},
[EXECMEM_MODULE_DATA] = {
.start = VMALLOC_START,
.end = VMALLOC_END,
.pgprot = PAGE_KERNEL,
.alignment = 1,
},
},
};

return &execmem_info;
}
54 changes: 22 additions & 32 deletions arch/s390/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,41 +37,31 @@

#define PLT_ENTRY_SIZE 22

static unsigned long get_module_load_offset(void)
static struct execmem_info execmem_info __ro_after_init;

struct execmem_info __init *execmem_arch_setup(void)
{
static DEFINE_MUTEX(module_kaslr_mutex);
static unsigned long module_load_offset;

if (!kaslr_enabled())
return 0;
/*
* Calculate the module_load_offset the first time this code
* is called. Once calculated it stays the same until reboot.
*/
mutex_lock(&module_kaslr_mutex);
if (!module_load_offset)
unsigned long module_load_offset = 0;
unsigned long start;

if (kaslr_enabled())
module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
return module_load_offset;
}

void *module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;

if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, gfp_mask, PAGE_KERNEL,
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}
return p;
start = MODULES_VADDR + module_load_offset;

execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
.flags = EXECMEM_KASAN_SHADOW,
.start = start,
.end = MODULES_END,
.pgprot = PAGE_KERNEL,
.alignment = MODULE_ALIGN,
},
},
};

return &execmem_info;
}

#ifdef CONFIG_FUNCTION_TRACER
Expand Down
70 changes: 23 additions & 47 deletions arch/x86/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <linux/jump_label.h>
#include <linux/random.h>
#include <linux/memory.h>
#include <linux/execmem.h>

#include <asm/text-patching.h>
#include <asm/page.h>
Expand All @@ -36,55 +37,30 @@ do { \
} while (0)
#endif

#ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset;
static struct execmem_info execmem_info __ro_after_init;

/* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex);

static unsigned long int get_module_load_offset(void)
{
if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
/*
* Calculate the module_load_offset the first time this
* code is called. Once calculated it stays the same until
* reboot.
*/
if (module_load_offset == 0)
module_load_offset =
get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
}
return module_load_offset;
}
#else
static unsigned long int get_module_load_offset(void)
struct execmem_info __init *execmem_arch_setup(void)
{
return 0;
}
#endif

void *module_alloc(unsigned long size)
{
gfp_t gfp_mask = GFP_KERNEL;
void *p;

if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;

p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, gfp_mask, PAGE_KERNEL,
VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));

if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}

return p;
unsigned long start, offset = 0;

if (kaslr_enabled())
offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;

start = MODULES_VADDR + offset;

execmem_info = (struct execmem_info){
.ranges = {
[EXECMEM_DEFAULT] = {
.flags = EXECMEM_KASAN_SHADOW,
.start = start,
.end = MODULES_END,
.pgprot = PAGE_KERNEL,
.alignment = MODULE_ALIGN,
},
},
};

return &execmem_info;
}

#ifdef CONFIG_X86_32
Expand Down
Loading

0 comments on commit 223b5e5

Please sign in to comment.