Skip to content

Commit

Permalink
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/penberg/linux

Pull SLAB changes from Pekka Enberg:
 "Random bug fixes that have accumulated in my inbox over the past few
  months"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  mm: Fix warning on make htmldocs caused by slab.c
  mm: slub: work around unneeded lockdep warning
  mm: sl[uo]b: fix misleading comments
  slub: Fix possible format string bug.
  slub: use lockdep_assert_held
  slub: Fix calculation of cpu slabs
  slab.h: remove duplicate kmalloc declaration and fix kernel-doc warnings
  • Loading branch information
torvalds committed Feb 2, 2014
2 parents 87af5e5 + cb8ee1a commit 7b383be
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 27 deletions.
8 changes: 4 additions & 4 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,8 @@ struct kmem_cache {

#ifdef CONFIG_SLUB
/*
* SLUB allocates up to order 2 pages directly and otherwise
* passes the request to the page allocator.
* SLUB directly allocates requests fitting in to an order-1 page
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
Expand All @@ -217,12 +217,12 @@ struct kmem_cache {

#ifdef CONFIG_SLOB
/*
* SLOB passes all page size and larger requests to the page allocator.
* SLOB passes all requests larger than one page to the page allocator.
* No kmalloc array is necessary since objects of different sizes can
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_MAX 30
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#define KMALLOC_SHIFT_MAX 30
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
Expand Down
2 changes: 1 addition & 1 deletion mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1946,7 +1946,7 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
* @slabp: slab pointer being destroyed
* @page: page pointer being destroyed
*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache. The
Expand Down
56 changes: 34 additions & 22 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1000,23 +1000,22 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)

/*
* Tracking of fully allocated slabs for debugging purposes.
*
* list_lock must be held.
*/
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
lockdep_assert_held(&n->list_lock);

if (!(s->flags & SLAB_STORE_USER))
return;

list_add(&page->lru, &n->full);
}

/*
* list_lock must be held.
*/
static void remove_full(struct kmem_cache *s, struct page *page)
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
lockdep_assert_held(&n->list_lock);

if (!(s->flags & SLAB_STORE_USER))
return;

Expand Down Expand Up @@ -1265,7 +1264,8 @@ static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, u8 val) { return 1; }
static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct page *page) {}
static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
struct page *page) {}
static inline unsigned long kmem_cache_flags(unsigned long object_size,
unsigned long flags, const char *name,
void (*ctor)(void *))
Expand Down Expand Up @@ -1519,25 +1519,24 @@ static void discard_slab(struct kmem_cache *s, struct page *page)

/*
* Management of partially allocated slabs.
*
* list_lock must be held.
*/
static inline void add_partial(struct kmem_cache_node *n,
struct page *page, int tail)
{
lockdep_assert_held(&n->list_lock);

n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
else
list_add(&page->lru, &n->partial);
}

/*
* list_lock must be held.
*/
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
lockdep_assert_held(&n->list_lock);

list_del(&page->lru);
n->nr_partial--;
}
Expand All @@ -1547,8 +1546,6 @@ static inline void remove_partial(struct kmem_cache_node *n,
* return the pointer to the freelist.
*
* Returns a list of objects or NULL if it fails.
*
* Must hold list_lock since we modify the partial list.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
Expand All @@ -1558,6 +1555,8 @@ static inline void *acquire_slab(struct kmem_cache *s,
unsigned long counters;
struct page new;

lockdep_assert_held(&n->list_lock);

/*
* Zap the freelist and set the frozen bit.
* The old freelist is the list of objects for the
Expand Down Expand Up @@ -1902,7 +1901,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,

else if (l == M_FULL)

remove_full(s, page);
remove_full(s, n, page);

if (m == M_PARTIAL) {

Expand Down Expand Up @@ -2556,7 +2555,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {

if (kmem_cache_has_cpu_partial(s) && !prior)
if (kmem_cache_has_cpu_partial(s) && !prior) {

/*
* Slab was on no list before and will be
Expand All @@ -2566,7 +2565,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
new.frozen = 1;

else { /* Needs to be taken off a list */
} else { /* Needs to be taken off a list */

n = get_node(s, page_to_nid(page));
/*
Expand Down Expand Up @@ -2615,7 +2614,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
remove_full(s, page);
remove_full(s, n, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
Expand All @@ -2629,9 +2628,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
*/
remove_partial(n, page);
stat(s, FREE_REMOVE_PARTIAL);
} else
} else {
/* Slab must be on the full list */
remove_full(s, page);
remove_full(s, n, page);
}

spin_unlock_irqrestore(&n->list_lock, flags);
stat(s, FREE_SLAB);
Expand Down Expand Up @@ -2905,7 +2905,13 @@ static void early_kmem_cache_node_alloc(int node)
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);

/*
* the lock is for lockdep's sake, not for any actual
* race protection
*/
spin_lock(&n->list_lock);
add_partial(n, page, DEACTIVATE_TO_HEAD);
spin_unlock(&n->list_lock);
}

static void free_kmem_cache_nodes(struct kmem_cache *s)
Expand Down Expand Up @@ -4314,7 +4320,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,

page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
node = page_to_nid(page);
if (flags & SO_TOTAL)
WARN_ON_ONCE(1);
else if (flags & SO_OBJECTS)
WARN_ON_ONCE(1);
else
x = page->pages;
total += x;
nodes[node] += x;
}
Expand Down Expand Up @@ -5178,7 +5190,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
}

s->kobj.kset = slab_kset;
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
if (err) {
kobject_put(&s->kobj);
return err;
Expand Down

0 comments on commit 7b383be

Please sign in to comment.