Skip to content

Commit

Permalink
mm: make minimum slab alignment a runtime property
Browse files Browse the repository at this point in the history
When CONFIG_KASAN_HW_TAGS is enabled we currently increase the minimum
slab alignment to 16.  This happens even if MTE is not supported in
hardware or disabled via kasan=off, which creates an unnecessary memory
overhead in those cases.  Eliminate this overhead by making the minimum
slab alignment a runtime property and only aligning to 16 if KASAN is
enabled at runtime.

On a DragonBoard 845c (non-MTE hardware) with a kernel built with
CONFIG_KASAN_HW_TAGS, waiting for quiescence after a full Android boot I
see the following Slab measurements in /proc/meminfo (median of 3
reboots):

Before: 169020 kB
After:  167304 kB

[[email protected]: make slab alignment type `unsigned int' to avoid casting]
Link: https://linux-review.googlesource.com/id/I752e725179b43b144153f4b6f584ceb646473ead
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Peter Collingbourne <[email protected]>
Reviewed-by: Andrey Konovalov <[email protected]>
Reviewed-by: Hyeonggon Yoo <[email protected]>
Tested-by: Hyeonggon Yoo <[email protected]>
Acked-by: David Rientjes <[email protected]>
Reviewed-by: Catalin Marinas <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Roman Gushchin <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Herbert Xu <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Eric W. Biederman <[email protected]>
Cc: Kees Cook <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
pcc authored and akpm00 committed May 13, 2022
1 parent 534aa1d commit d949a81
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 16 deletions.
17 changes: 12 additions & 5 deletions arch/arm64/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#define __ASM_CACHE_H

#include <asm/cputype.h>
#include <asm/mte-def.h>

#define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3
Expand Down Expand Up @@ -49,16 +50,22 @@
*/
#define ARCH_DMA_MINALIGN (128)

#ifndef __ASSEMBLY__

#include <linux/bitops.h>
#include <linux/kasan-enabled.h>

#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
#elif defined(CONFIG_KASAN_HW_TAGS)
#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE
static inline unsigned int arch_slab_minalign(void)
{
return kasan_hw_tags_enabled() ? MTE_GRANULE_SIZE :
__alignof__(unsigned long long);
}
#define arch_slab_minalign() arch_slab_minalign()
#endif

#ifndef __ASSEMBLY__

#include <linux/bitops.h>

#define ICACHEF_ALIASING 0
#define ICACHEF_VPIPT 1
extern unsigned long __icache_flags;
Expand Down
12 changes: 12 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,18 @@ void kmem_dump_obj(void *object);
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
* Arches can define this function if they want to decide the minimum slab
* alignment at runtime. The value returned by the function must be a power
* of two and >= ARCH_SLAB_MINALIGN.
*/
#ifndef arch_slab_minalign
static inline unsigned int arch_slab_minalign(void)
{
return ARCH_SLAB_MINALIGN;
}
#endif

/*
* kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
* pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
Expand Down
7 changes: 3 additions & 4 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -3009,10 +3009,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
if (ARCH_SLAB_MINALIGN &&
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n",
objp, (int)ARCH_SLAB_MINALIGN);
if ((unsigned long)objp & (arch_slab_minalign() - 1)) {
pr_err("0x%px: not aligned to arch_slab_minalign()=%u\n", objp,
arch_slab_minalign());
}
return objp;
}
Expand Down
3 changes: 1 addition & 2 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,7 @@ static unsigned int calculate_alignment(slab_flags_t flags,
align = max(align, ralign);
}

if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;
align = max(align, arch_slab_minalign());

return ALIGN(align, sizeof(void *));
}
Expand Down
16 changes: 11 additions & 5 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -478,9 +478,11 @@ static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
{
unsigned int *m;
int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int minalign;
void *ret;

minalign = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
gfp &= gfp_allowed_mask;

might_alloc(gfp);
Expand All @@ -493,7 +495,7 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
* kmalloc()'d objects.
*/
if (is_power_of_2(size))
align = max(minalign, (int) size);
align = max_t(unsigned int, minalign, size);

if (!size)
return ZERO_SIZE_PTR;
Expand Down Expand Up @@ -555,8 +557,11 @@ void kfree(const void *block)

sp = virt_to_folio(block);
if (folio_test_slab(sp)) {
int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int align = max_t(unsigned int,
ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
unsigned int *m = (unsigned int *)(block - align);

slob_free(m, *m + align);
} else {
unsigned int order = folio_order(sp);
Expand All @@ -573,7 +578,7 @@ EXPORT_SYMBOL(kfree);
size_t __ksize(const void *block)
{
struct folio *folio;
int align;
unsigned int align;
unsigned int *m;

BUG_ON(!block);
Expand All @@ -584,7 +589,8 @@ size_t __ksize(const void *block)
if (unlikely(!folio_test_slab(folio)))
return folio_size(folio);

align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
align = max_t(unsigned int, ARCH_KMALLOC_MINALIGN,
arch_slab_minalign());
m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
}
Expand Down

0 comments on commit d949a81

Please sign in to comment.