Skip to content

Commit

Permalink
module: use relative references for __ksymtab entries
Browse files Browse the repository at this point in the history
An ordinary arm64 defconfig build has ~64 KB worth of __ksymtab entries,
each consisting of two 64-bit fields containing absolute references, to
the symbol itself and to a char array containing its name, respectively.

When we build the same configuration with KASLR enabled, we end up with an
additional ~192 KB of relocations in the .init section, i.e., one 24 byte
entry for each absolute reference, which all need to be processed at boot
time.

Given how the struct kernel_symbol that describes each entry is completely
local to module.c (except for the references emitted by EXPORT_SYMBOL()
itself), we can easily modify it to contain two 32-bit relative references
instead.  This reduces the size of the __ksymtab section by 50% for all
64-bit architectures, and gets rid of the runtime relocations entirely for
architectures implementing KASLR, either via standard PIE linking (arm64)
or using custom host tools (x86).

Note that the binary search involving __ksymtab contents relies on each
section being sorted by symbol name.  This is implemented based on the
input section names, not the names in the ksymtab entries, so this patch
does not interfere with that.

Given that the use of place-relative relocations requires support both in
the toolchain and in the module loader, we cannot enable this feature for
all architectures.  So make it dependent on whether
CONFIG_HAVE_ARCH_PREL32_RELOCATIONS is defined.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ard Biesheuvel <[email protected]>
Acked-by: Jessica Yu <[email protected]>
Acked-by: Michael Ellerman <[email protected]>
Reviewed-by: Will Deacon <[email protected]>
Acked-by: Ingo Molnar <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Bjorn Helgaas <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: James Morris <[email protected]>
Cc: James Morris <[email protected]>
Cc: Josh Poimboeuf <[email protected]>
Cc: Kees Cook <[email protected]>
Cc: Nicolas Pitre <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Petr Mladek <[email protected]>
Cc: Russell King <[email protected]>
Cc: "Serge E. Hallyn" <[email protected]>
Cc: Sergey Senozhatsky <[email protected]>
Cc: Steven Rostedt <[email protected]>
Cc: Thomas Garnier <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Ard Biesheuvel authored and torvalds committed Aug 22, 2018
1 parent f922c4a commit 7290d58
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 24 deletions.
1 change: 1 addition & 0 deletions arch/x86/include/asm/Kbuild
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,6 @@ generated-y += xen-hypercalls.h

generic-y += dma-contiguous.h
generic-y += early_ioremap.h
generic-y += export.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
5 changes: 0 additions & 5 deletions arch/x86/include/asm/export.h

This file was deleted.

12 changes: 10 additions & 2 deletions include/asm-generic/export.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,10 @@
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#else
#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
Expand All @@ -19,6 +17,16 @@
#define KCRC_ALIGN 4
#endif

.macro __put, val, name
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
.long \val - ., \name - .
#elif defined(CONFIG_64BIT)
.quad \val, \name
#else
.long \val, \name
#endif
.endm

/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
Expand Down
19 changes: 19 additions & 0 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,25 @@ unsigned long read_word_at_a_time(const void *addr)

#endif /* __KERNEL__ */

/*
* Force the compiler to emit 'sym' as a symbol, so that we can reference
* it from inline assembler. Necessary in case 'sym' could be inlined
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
static void * __attribute__((section(".discard.addressable"), used)) \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;

/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
* @off: the address of the 32-bit offset value
*/
static inline void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}

#endif /* __ASSEMBLY__ */

#ifndef __optimize
Expand Down
46 changes: 35 additions & 11 deletions include/linux/export.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,6 @@
#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)

#ifndef __ASSEMBLY__
struct kernel_symbol
{
unsigned long value;
const char *name;
};

#ifdef MODULE
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
Expand Down Expand Up @@ -54,17 +48,47 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif

#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
#include <linux/compiler.h>
/*
* Emit the ksymtab entry as a pair of relative references: this reduces
* the size by half on 64-bit architectures, and eliminates the need for
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
#define __KSYMTAB_ENTRY(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 8 \n" \
"__ksymtab_" #sym ": \n" \
" .long " #sym "- . \n" \
" .long __kstrtab_" #sym "- . \n" \
" .previous \n")

struct kernel_symbol {
int value_offset;
int name_offset;
};
#else
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
= { (unsigned long)&sym, __kstrtab_##sym }

struct kernel_symbol {
unsigned long value;
const char *name;
};
#endif

/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
__attribute__((section("__ksymtab_strings"), aligned(1))) \
__attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #sym; \
static const struct kernel_symbol __ksymtab_##sym \
__used \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
= { (unsigned long)&sym, __kstrtab_##sym }
__KSYMTAB_ENTRY(sym, sec)

#if defined(__DISABLE_EXPORTS)

Expand Down
32 changes: 26 additions & 6 deletions kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -529,12 +529,30 @@ static bool check_symbol(const struct symsearch *syms,
return true;
}

static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
{
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
return (unsigned long)offset_to_ptr(&sym->value_offset);
#else
return sym->value;
#endif
}

static const char *kernel_symbol_name(const struct kernel_symbol *sym)
{
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
return offset_to_ptr(&sym->name_offset);
#else
return sym->name;
#endif
}

static int cmp_name(const void *va, const void *vb)
{
const char *a;
const struct kernel_symbol *b;
a = va; b = vb;
return strcmp(a, b->name);
return strcmp(a, kernel_symbol_name(b));
}

static bool find_symbol_in_section(const struct symsearch *syms,
Expand Down Expand Up @@ -2170,7 +2188,7 @@ void *__symbol_get(const char *symbol)
sym = NULL;
preempt_enable();

return sym ? (void *)sym->value : NULL;
return sym ? (void *)kernel_symbol_value(sym) : NULL;
}
EXPORT_SYMBOL_GPL(__symbol_get);

Expand Down Expand Up @@ -2200,10 +2218,12 @@ static int verify_export_symbols(struct module *mod)

for (i = 0; i < ARRAY_SIZE(arr); i++) {
for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
if (find_symbol(s->name, &owner, NULL, true, false)) {
if (find_symbol(kernel_symbol_name(s), &owner, NULL,
true, false)) {
pr_err("%s: exports duplicate symbol %s"
" (owned by %s)\n",
mod->name, s->name, module_name(owner));
mod->name, kernel_symbol_name(s),
module_name(owner));
return -ENOEXEC;
}
}
Expand Down Expand Up @@ -2252,7 +2272,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */
if (ksym && !IS_ERR(ksym)) {
sym[i].st_value = ksym->value;
sym[i].st_value = kernel_symbol_value(ksym);
break;
}

Expand Down Expand Up @@ -2516,7 +2536,7 @@ static int is_exported(const char *name, unsigned long value,
ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
else
ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
return ks != NULL && ks->value == value;
return ks != NULL && kernel_symbol_value(ks) == value;
}

/* As per nm */
Expand Down

0 comments on commit 7290d58

Please sign in to comment.