Skip to content

Commit

Permalink
mm, slub: Rename slab_alloc() -> slab_alloc_node() to match SLAB
Browse files Browse the repository at this point in the history
This patch does not fix anything, and its only goal is to enable us
to obtain some common code between SLAB and SLUB.
Neither behavior nor produced code is affected.

Cc: Christoph Lameter <[email protected]>
Signed-off-by: Ezequiel Garcia <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
ezequielgarcia authored and penberg committed Sep 25, 2012
1 parent 4835630 commit 2b847c3
Showing 1 changed file with 15 additions and 9 deletions.
24 changes: 15 additions & 9 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2313,7 +2313,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
static __always_inline void *slab_alloc(struct kmem_cache *s,
static __always_inline void *slab_alloc_node(struct kmem_cache *s,
gfp_t gfpflags, int node, unsigned long addr)
{
void **object;
Expand Down Expand Up @@ -2383,9 +2383,15 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return object;
}

static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, unsigned long addr)
{
return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
}

void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
void *ret = slab_alloc(s, gfpflags, _RET_IP_);

trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);

Expand All @@ -2396,7 +2402,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
return ret;
}
Expand All @@ -2414,7 +2420,7 @@ EXPORT_SYMBOL(kmalloc_order_trace);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);

trace_kmem_cache_alloc_node(_RET_IP_, ret,
s->object_size, s->size, gfpflags, node);
Expand All @@ -2428,7 +2434,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
gfp_t gfpflags,
int node, size_t size)
{
void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);

trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
Expand Down Expand Up @@ -3366,7 +3372,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

ret = slab_alloc(s, flags, NUMA_NO_NODE, _RET_IP_);
ret = slab_alloc(s, flags, _RET_IP_);

trace_kmalloc(_RET_IP_, ret, size, s->size, flags);

Expand Down Expand Up @@ -3409,7 +3415,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

ret = slab_alloc(s, flags, node, _RET_IP_);
ret = slab_alloc_node(s, flags, node, _RET_IP_);

trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);

Expand Down Expand Up @@ -4037,7 +4043,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
ret = slab_alloc(s, gfpflags, caller);

/* Honor the call site pointer we received. */
trace_kmalloc(caller, ret, size, s->size, gfpflags);
Expand Down Expand Up @@ -4067,7 +4073,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (unlikely(ZERO_OR_NULL_PTR(s)))
return s;

ret = slab_alloc(s, gfpflags, node, caller);
ret = slab_alloc_node(s, gfpflags, node, caller);

/* Honor the call site pointer we received. */
trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
Expand Down

0 comments on commit 2b847c3

Please sign in to comment.