Skip to content

Commit

Permalink
x86/kasan: support KASAN_VMALLOC
Browse files Browse the repository at this point in the history
In the case where KASAN directly allocates memory to back vmalloc space,
don't map the early shadow page over it.

We prepopulate pgds/p4ds for the range that would otherwise be empty.
This is required to get it synced to hardware on boot, allowing the
lower levels of the page tables to be filled dynamically.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Daniel Axtens <[email protected]>
Acked-by: Dmitry Vyukov <[email protected]>
Reviewed-by: Andrey Ryabinin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Christophe Leroy <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Vasily Gorbik <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
daxtens authored and torvalds committed Dec 1, 2019
1 parent eafb149 commit 0609ae0
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 0 deletions.
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ config X86
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
Expand Down
61 changes: 61 additions & 0 deletions arch/x86/mm/kasan_init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,49 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
} while (pgd++, addr = next, addr != end);
}

static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
unsigned long addr,
unsigned long end)
{
p4d_t *p4d;
unsigned long next;
void *p;

p4d = p4d_offset(pgd, addr);
do {
next = p4d_addr_end(addr, end);

if (p4d_none(*p4d)) {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
p4d_populate(&init_mm, p4d, p);
}
} while (p4d++, addr = next, addr != end);
}

static void __init kasan_shallow_populate_pgds(void *start, void *end)
{
unsigned long addr, next;
pgd_t *pgd;
void *p;

addr = (unsigned long)start;
pgd = pgd_offset_k(addr);
do {
next = pgd_addr_end(addr, (unsigned long)end);

if (pgd_none(*pgd)) {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
pgd_populate(&init_mm, pgd, p);
}

/*
* we need to populate p4ds to be synced when running in
* four level mode - see sync_global_pgds_l4()
*/
kasan_shallow_populate_p4ds(pgd, addr, next);
} while (pgd++, addr = next, addr != (unsigned long)end);
}

#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
Expand Down Expand Up @@ -354,6 +397,24 @@ void __init kasan_init(void)

kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
kasan_mem_to_shadow((void *)VMALLOC_START));

/*
* If we're in full vmalloc mode, don't back vmalloc space with early
* shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
* the global table and we can populate the lower levels on demand.
*/
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_shallow_populate_pgds(
kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)VMALLOC_END));
else
kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)VMALLOC_START),
kasan_mem_to_shadow((void *)VMALLOC_END));

kasan_populate_early_shadow(
kasan_mem_to_shadow((void *)VMALLOC_END + 1),
shadow_cpu_entry_begin);

kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
Expand Down

0 comments on commit 0609ae0

Please sign in to comment.