Skip to content

Commit

Permalink
slab: remove a useless lockdep annotation
Browse files Browse the repository at this point in the history
Now, there is no code to hold two lock simultaneously, since we don't
call slab_destroy() with holding any lock.  So, lockdep annotation is
useless now.  Remove it.

v2: don't remove BAD_ALIEN_MAGIC in this patch. It will be removed
    in the following patch.

Signed-off-by: Joonsoo Kim <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JoonsooKim authored and torvalds committed Aug 7, 2014
1 parent 833b706 commit 367f7f2
Showing 1 changed file with 0 additions and 153 deletions.
153 changes: 0 additions & 153 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -472,139 +472,6 @@ static struct kmem_cache kmem_cache_boot = {

#define BAD_ALIEN_MAGIC 0x01020304ul

#ifdef CONFIG_LOCKDEP

/*
* Slab sometimes uses the kmalloc slabs to store the slab headers
* for other slabs "off slab".
* The locking for this is tricky in that it nests within the locks
* of all other slabs in a few places; to deal with this special
* locking we put on-slab caches into a separate lock-class.
*
* We set lock class for alien array caches which are up during init.
* The lock annotation will be lost if all cpus of a node goes down and
* then comes back up during hotplug
*/
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;

static struct lock_class_key debugobj_l3_key;
static struct lock_class_key debugobj_alc_key;

static void slab_set_lock_classes(struct kmem_cache *cachep,
struct lock_class_key *l3_key, struct lock_class_key *alc_key,
struct kmem_cache_node *n)
{
struct alien_cache **alc;
int r;

lockdep_set_class(&n->list_lock, l3_key);
alc = n->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
return;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&(alc[r]->lock), alc_key);
}
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, n);
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
int node;
struct kmem_cache_node *n;

for_each_kmem_cache_node(cachep, node, n)
slab_set_debugobj_lock_classes_node(cachep, n);
}

static void init_node_lock_keys(int q)
{
int i;

if (slab_state < UP)
return;

for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) {
struct kmem_cache_node *n;
struct kmem_cache *cache = kmalloc_caches[i];

if (!cache)
continue;

n = get_node(cache, q);
if (!n || OFF_SLAB(cache))
continue;

slab_set_lock_classes(cache, &on_slab_l3_key,
&on_slab_alc_key, n);
}
}

static void on_slab_lock_classes_node(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
slab_set_lock_classes(cachep, &on_slab_l3_key,
&on_slab_alc_key, n);
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
int node;
struct kmem_cache_node *n;

VM_BUG_ON(OFF_SLAB(cachep));
for_each_kmem_cache_node(cachep, node, n)
on_slab_lock_classes_node(cachep, n);
}

static inline void __init init_lock_keys(void)
{
int node;

for_each_node(node)
init_node_lock_keys(node);
}
#else
static void __init init_node_lock_keys(int q)
{
}

static inline void init_lock_keys(void)
{
}

static inline void on_slab_lock_classes(struct kmem_cache *cachep)
{
}

static inline void on_slab_lock_classes_node(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
}

static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep,
struct kmem_cache_node *n)
{
}

static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
{
}
#endif

static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);

static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Expand Down Expand Up @@ -1348,13 +1215,7 @@ static int cpuup_prepare(long cpu)
spin_unlock_irq(&n->list_lock);
kfree(shared);
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
slab_set_debugobj_lock_classes_node(cachep, n);
else if (!OFF_SLAB(cachep) &&
!(cachep->flags & SLAB_DESTROY_BY_RCU))
on_slab_lock_classes_node(cachep, n);
}
init_node_lock_keys(node);

return 0;
bad:
Expand Down Expand Up @@ -1663,9 +1524,6 @@ void __init kmem_cache_init_late(void)
BUG();
mutex_unlock(&slab_mutex);

/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();

/* Done! */
slab_state = FULL;

Expand Down Expand Up @@ -2446,17 +2304,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
return err;
}

if (flags & SLAB_DEBUG_OBJECTS) {
/*
* Would deadlock through slab_destroy()->call_rcu()->
* debug_object_activate()->kmem_cache_alloc().
*/
WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);

slab_set_debugobj_lock_classes(cachep);
} else if (!OFF_SLAB(cachep) && !(flags & SLAB_DESTROY_BY_RCU))
on_slab_lock_classes(cachep);

return 0;
}

Expand Down

0 comments on commit 367f7f2

Please sign in to comment.