Skip to content

Commit

Permalink
mm/sl[aou]b: Shrink __kmem_cache_create() parameter lists
Browse files Browse the repository at this point in the history
Do the initial settings of the fields in common code. This will allow us
to push more processing into common code later and improve readability.

Signed-off-by: Christoph Lameter <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
Christoph Lameter authored and penberg committed Sep 5, 2012
1 parent 278b1bb commit 8a13a4c
Show file tree
Hide file tree
Showing 5 changed files with 68 additions and 81 deletions.
73 changes: 33 additions & 40 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1677,20 +1677,20 @@ void __init kmem_cache_init(void)
*/

sizes[INDEX_AC].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);

sizes[INDEX_AC].cs_cachep->name = names[INDEX_AC].name;
sizes[INDEX_AC].cs_cachep->size = sizes[INDEX_AC].cs_size;
sizes[INDEX_AC].cs_cachep->object_size = sizes[INDEX_AC].cs_size;
sizes[INDEX_AC].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes[INDEX_AC].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);

if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
sizes[INDEX_L3].cs_cachep->name = names[INDEX_L3].name;
sizes[INDEX_L3].cs_cachep->size = sizes[INDEX_L3].cs_size;
sizes[INDEX_L3].cs_cachep->object_size = sizes[INDEX_L3].cs_size;
sizes[INDEX_L3].cs_cachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes[INDEX_L3].cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
}

Expand All @@ -1706,22 +1706,21 @@ void __init kmem_cache_init(void)
*/
if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
__kmem_cache_create(sizes->cs_cachep, names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
sizes->cs_cachep->name = names->name;
sizes->cs_cachep->size = sizes->cs_size;
sizes->cs_cachep->object_size = sizes->cs_size;
sizes->cs_cachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes->cs_cachep, ARCH_KMALLOC_FLAGS|SLAB_PANIC);
list_add(&sizes->cs_cachep->list, &slab_caches);
}
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
sizes->cs_dmacachep->name = names->name_dma;
sizes->cs_dmacachep->size = sizes->cs_size;
sizes->cs_dmacachep->object_size = sizes->cs_size;
sizes->cs_dmacachep->align = ARCH_KMALLOC_MINALIGN;
__kmem_cache_create(sizes->cs_dmacachep,
names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL);
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC);
list_add(&sizes->cs_dmacachep->list, &slab_caches);
#endif
sizes++;
Expand Down Expand Up @@ -2360,12 +2359,12 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
* as davem.
*/
int
__kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
__kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
{
size_t left_over, slab_size, ralign;
gfp_t gfp;
int err;
size_t size = cachep->size;

#if DEBUG
#if FORCED_DEBUG
Expand Down Expand Up @@ -2437,25 +2436,23 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
ralign = ARCH_SLAB_MINALIGN;
}
/* 3) caller mandated alignment */
if (ralign < align) {
ralign = align;
if (ralign < cachep->align) {
ralign = cachep->align;
}
/* disable debug if necessary */
if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
*/
align = ralign;
cachep->align = ralign;

if (slab_is_available())
gfp = GFP_KERNEL;
else
gfp = GFP_NOWAIT;

cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
cachep->object_size = size;
cachep->align = align;
#if DEBUG

/*
Expand Down Expand Up @@ -2500,17 +2497,15 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
*/
flags |= CFLGS_OFF_SLAB;

size = ALIGN(size, align);
size = ALIGN(size, cachep->align);

left_over = calculate_slab_order(cachep, size, align, flags);
left_over = calculate_slab_order(cachep, size, cachep->align, flags);

if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
if (!cachep->num)
return -E2BIG;
}

slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
+ sizeof(struct slab), cachep->align);

/*
* If the slab has been placed off-slab, and we have enough space then
Expand Down Expand Up @@ -2538,8 +2533,8 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s

cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align)
cachep->colour_off = align;
if (cachep->colour_off < cachep->align)
cachep->colour_off = cachep->align;
cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
Expand All @@ -2560,8 +2555,6 @@ __kmem_cache_create (struct kmem_cache *cachep, const char *name, size_t size, s
*/
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
}
cachep->ctor = ctor;
cachep->name = name;
cachep->refcount = 1;

err = setup_cpu_cache(cachep, gfp);
Expand Down
3 changes: 1 addition & 2 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,7 @@ extern struct list_head slab_caches;
extern struct kmem_cache *kmem_cache;

/* Functions provided by the slab allocators */
extern int __kmem_cache_create(struct kmem_cache *, const char *name,
size_t size, size_t align, unsigned long flags, void (*ctor)(void *));
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);

#ifdef CONFIG_SLUB
struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
Expand Down
26 changes: 13 additions & 13 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
{
struct kmem_cache *s = NULL;
int err = 0;
char *n;

get_online_cpus();
mutex_lock(&slab_mutex);
Expand All @@ -109,32 +108,33 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
goto out_locked;


n = kstrdup(name, GFP_KERNEL);
if (!n) {
err = -ENOMEM;
goto out_locked;
}

s = __kmem_cache_alias(name, size, align, flags, ctor);
if (s)
goto out_locked;

s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (s) {
err = __kmem_cache_create(s, n, size, align, flags, ctor);
s->object_size = s->size = size;
s->align = align;
s->ctor = ctor;
s->name = kstrdup(name, GFP_KERNEL);
if (!s->name) {
kmem_cache_free(kmem_cache, s);
err = -ENOMEM;
goto out_locked;
}

err = __kmem_cache_create(s, flags);
if (!err)

list_add(&s->list, &slab_caches);

else {
kfree(n);
kfree(s->name);
kmem_cache_free(kmem_cache, s);
}

} else {
kfree(n);
} else
err = -ENOMEM;
}

out_locked:
mutex_unlock(&slab_mutex);
Expand Down
8 changes: 3 additions & 5 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -508,17 +508,15 @@ size_t ksize(const void *block)
}
EXPORT_SYMBOL(ksize);

int __kmem_cache_create(struct kmem_cache *c, const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
{
c->name = name;
c->size = size;
size_t align = c->size;

if (flags & SLAB_DESTROY_BY_RCU) {
/* leave room for rcu footer at the end of object */
c->size += sizeof(struct slob_rcu);
}
c->flags = flags;
c->ctor = ctor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < ARCH_SLAB_MINALIGN)
Expand Down
39 changes: 18 additions & 21 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -3029,16 +3029,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)

}

static int kmem_cache_open(struct kmem_cache *s,
const char *name, size_t size,
size_t align, unsigned long flags,
void (*ctor)(void *))
static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
{
s->name = name;
s->ctor = ctor;
s->object_size = size;
s->align = align;
s->flags = kmem_cache_flags(size, flags, name, ctor);
s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
s->reserved = 0;

if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
Expand Down Expand Up @@ -3115,7 +3108,7 @@ static int kmem_cache_open(struct kmem_cache *s,
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)size, s->size, oo_order(s->oo),
s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
s->offset, flags);
return -EINVAL;
}
Expand Down Expand Up @@ -3261,12 +3254,15 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name,

s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);

s->name = name;
s->size = s->object_size = size;
s->align = ARCH_KMALLOC_MINALIGN;

/*
* This function is called with IRQs disabled during early-boot on
* single CPU so there's no need to take slab_mutex here.
*/
if (kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN,
flags, NULL))
if (kmem_cache_open(s, flags))
goto panic;

list_add(&s->list, &slab_caches);
Expand Down Expand Up @@ -3719,18 +3715,21 @@ void __init kmem_cache_init(void)
*/
kmem_cache_node = (void *)kmem_cache + kmalloc_size;

kmem_cache_open(kmem_cache_node, "kmem_cache_node",
sizeof(struct kmem_cache_node),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
kmem_cache_node->name = "kmem_cache_node";
kmem_cache_node->size = kmem_cache_node->object_size =
sizeof(struct kmem_cache_node);
kmem_cache_open(kmem_cache_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);

hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);

/* Able to allocate the per node structures */
slab_state = PARTIAL;

temp_kmem_cache = kmem_cache;
kmem_cache_open(kmem_cache, "kmem_cache", kmem_size,
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
kmem_cache->name = "kmem_cache";
kmem_cache->size = kmem_cache->object_size = kmem_size;
kmem_cache_open(kmem_cache, SLAB_HWCACHE_ALIGN | SLAB_PANIC);

kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT);
memcpy(kmem_cache, temp_kmem_cache, kmem_size);

Expand Down Expand Up @@ -3943,11 +3942,9 @@ struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
return s;
}

int __kmem_cache_create(struct kmem_cache *s,
const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
{
return kmem_cache_open(s, name, size, align, flags, ctor);
return kmem_cache_open(s, flags);
}

#ifdef CONFIG_SMP
Expand Down

0 comments on commit 8a13a4c

Please sign in to comment.