Skip to content

Commit

Permalink
kasan: remove SLUB checks for page_alloc fallbacks in tests
Browse files Browse the repository at this point in the history
A number of KASAN tests rely on the fact that calling kmalloc with a size
larger than an order-1 page falls back onto page_alloc.

This fallback was originally only implemented for SLUB, but since commit
d6a7164 ("mm/slab: kmalloc: pass requests larger than order-1 page to
page allocator"), it is also implemented for SLAB.

Thus, drop the SLUB checks from the tests.

Link: https://lkml.kernel.org/r/c82099b6fb365b6f4c2c21b112d4abb4dfd83e53.1703188911.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Ryabinin <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Marco Elver <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
xairy authored and akpm00 committed Dec 29, 2023
1 parent f2fffc0 commit 3ab9304
Showing 1 changed file with 2 additions and 24 deletions.
26 changes: 2 additions & 24 deletions mm/kasan/kasan_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ static void kmalloc_node_oob_right(struct kunit *test)

/*
* Check that KASAN detects an out-of-bounds access for a big object allocated
* via kmalloc(). But not as big as to trigger the page_alloc fallback for SLUB.
* via kmalloc(). But not as big as to trigger the page_alloc fallback.
*/
static void kmalloc_big_oob_right(struct kunit *test)
{
Expand All @@ -233,17 +233,14 @@ static void kmalloc_big_oob_right(struct kunit *test)
/*
* The kmalloc_large_* tests below use kmalloc() to allocate a memory chunk
* that does not fit into the largest slab cache and therefore is allocated via
* the page_alloc fallback for SLUB. SLAB has no such fallback, and thus these
* tests are not supported for it.
* the page_alloc fallback.
*/

static void kmalloc_large_oob_right(struct kunit *test)
{
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

Expand All @@ -258,8 +255,6 @@ static void kmalloc_large_uaf(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
Expand All @@ -272,8 +267,6 @@ static void kmalloc_large_invalid_free(struct kunit *test)
char *ptr;
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;

KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

Expand Down Expand Up @@ -407,18 +400,12 @@ static void krealloc_less_oob(struct kunit *test)

static void krealloc_large_more_oob(struct kunit *test)
{
/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
KMALLOC_MAX_CACHE_SIZE + 235);
}

static void krealloc_large_less_oob(struct kunit *test)
{
/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
KMALLOC_MAX_CACHE_SIZE + 201);
}
Expand Down Expand Up @@ -1156,9 +1143,6 @@ static void mempool_kmalloc_large_uaf(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
void *extra_elem;

/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

extra_elem = mempool_prepare_kmalloc(test, &pool, size);

mempool_uaf_helper(test, &pool, false);
Expand Down Expand Up @@ -1227,9 +1211,6 @@ static void mempool_kmalloc_large_double_free(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
char *extra_elem;

/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

extra_elem = mempool_prepare_kmalloc(test, &pool, size);

mempool_double_free_helper(test, &pool);
Expand Down Expand Up @@ -1284,9 +1265,6 @@ static void mempool_kmalloc_large_invalid_free(struct kunit *test)
size_t size = KMALLOC_MAX_CACHE_SIZE + 1;
char *extra_elem;

/* page_alloc fallback is only implemented for SLUB. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);

extra_elem = mempool_prepare_kmalloc(test, &pool, size);

mempool_kmalloc_invalid_free_helper(test, &pool);
Expand Down

0 comments on commit 3ab9304

Please sign in to comment.