Skip to content

Commit

Permalink
mm/slab: refactor common ksize KASAN logic into slab_common.c
Browse files Browse the repository at this point in the history
This refactors common code of ksize() between the various allocators into
slab_common.c: __ksize() is the allocator-specific implementation without
instrumentation, whereas ksize() includes the required KASAN logic.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Marco Elver <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Reviewed-by: Andrey Ryabinin <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Andrey Konovalov <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Mark Rutland <[email protected]>
Cc: Kees Cook <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
melver authored and torvalds committed Jul 12, 2019
1 parent bb104ed commit 10d1f8c
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 31 deletions.
1 change: 1 addition & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
size_t __ksize(const void *);
size_t ksize(const void *);

#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
Expand Down
22 changes: 5 additions & 17 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -4204,20 +4204,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
#endif /* CONFIG_HARDENED_USERCOPY */

/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
* __ksize -- Uninstrumented ksize.
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
* Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
* safety checks as ksize() with KASAN instrumentation enabled.
*/
size_t ksize(const void *objp)
size_t __ksize(const void *objp)
{
struct kmem_cache *c;
size_t size;
Expand All @@ -4228,11 +4220,7 @@ size_t ksize(const void *objp)

c = virt_to_cache(objp);
size = c ? c->object_size : 0;
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(objp, size);

return size;
}
EXPORT_SYMBOL(ksize);
EXPORT_SYMBOL(__ksize);
26 changes: 26 additions & 0 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1597,6 +1597,32 @@ void kzfree(const void *p)
}
EXPORT_SYMBOL(kzfree);

/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*
* Return: size of the actual memory used by @objp in bytes
*/
size_t ksize(const void *objp)
{
size_t size = __ksize(objp);
/*
* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(objp, size);
return size;
}
EXPORT_SYMBOL(ksize);

/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
Expand Down
4 changes: 2 additions & 2 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ void kfree(const void *block)
EXPORT_SYMBOL(kfree);

/* can't use ksize for kmem_cache_alloc memory, only kmalloc */
size_t ksize(const void *block)
size_t __ksize(const void *block)
{
struct page *sp;
int align;
Expand All @@ -545,7 +545,7 @@ size_t ksize(const void *block)
m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
}
EXPORT_SYMBOL(ksize);
EXPORT_SYMBOL(__ksize);

int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags)
{
Expand Down
14 changes: 2 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -3895,7 +3895,7 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
}
#endif /* CONFIG_HARDENED_USERCOPY */

static size_t __ksize(const void *object)
size_t __ksize(const void *object)
{
struct page *page;

Expand All @@ -3911,17 +3911,7 @@ static size_t __ksize(const void *object)

return slab_ksize(page->slab_cache);
}

size_t ksize(const void *object)
{
size_t size = __ksize(object);
/* We assume that ksize callers could use whole allocated area,
* so we need to unpoison this area.
*/
kasan_unpoison_shadow(object, size);
return size;
}
EXPORT_SYMBOL(ksize);
EXPORT_SYMBOL(__ksize);

void kfree(const void *x)
{
Expand Down

0 comments on commit 10d1f8c

Please sign in to comment.