Skip to content

Commit

Permalink
slub: Commonize slab_cache field in struct page
Browse files Browse the repository at this point in the history
Right now, slab and slub have fields in struct page to derive which
cache a page belongs to, but they do it slightly differently.

slab uses a field called slab_cache, that lives in the third double
word. slub, uses a field called "slab", living outside of the
doublewords area.

Ideally, we could use the same field for this. Since slub heavily makes
use of the doubleword region, there isn't really much room to move
slub's slab_cache field around. Since slab does not have such strict
placement restrictions, we can move it outside the doubleword area.

The naming used by slab, "slab_cache", is less confusing, and it is
preferred over slub's generic "slab".

Signed-off-by: Glauber Costa <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
CC: David Rientjes <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
Glauber Costa authored and penberg committed Oct 24, 2012
1 parent b4f591c commit 1b4f59e
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 17 deletions.
7 changes: 2 additions & 5 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -128,10 +128,7 @@ struct page {
};

struct list_head list; /* slobs list of pages */
struct { /* slab fields */
struct kmem_cache *slab_cache;
struct slab *slab_page;
};
struct slab *slab_page; /* slab fields */
};

/* Remainder is not double word aligned */
Expand All @@ -146,7 +143,7 @@ struct page {
#if USE_SPLIT_PTLOCKS
spinlock_t ptl;
#endif
struct kmem_cache *slab; /* SLUB: Pointer to slab */
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
};

Expand Down
24 changes: 12 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1092,11 +1092,11 @@ static noinline struct kmem_cache_node *free_debug_processing(
if (!check_object(s, page, object, SLUB_RED_ACTIVE))
goto out;

if (unlikely(s != page->slab)) {
if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) "
"outside of slab", object);
} else if (!page->slab) {
} else if (!page->slab_cache) {
printk(KERN_ERR
"SLUB <none>: no slab for object 0x%p.\n",
object);
Expand Down Expand Up @@ -1357,7 +1357,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
goto out;

inc_slabs_node(s, page_to_nid(page), page->objects);
page->slab = s;
page->slab_cache = s;
__SetPageSlab(page);
if (page->pfmemalloc)
SetPageSlabPfmemalloc(page);
Expand Down Expand Up @@ -1424,7 +1424,7 @@ static void rcu_free_slab(struct rcu_head *h)
else
page = container_of((struct list_head *)h, struct page, lru);

__free_slab(page->slab, page);
__free_slab(page->slab_cache, page);
}

static void free_slab(struct kmem_cache *s, struct page *page)
Expand Down Expand Up @@ -2617,9 +2617,9 @@ void kmem_cache_free(struct kmem_cache *s, void *x)

page = virt_to_head_page(x);

if (kmem_cache_debug(s) && page->slab != s) {
if (kmem_cache_debug(s) && page->slab_cache != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab->name, s->name);
" is from %s\n", page->slab_cache->name, s->name);
WARN_ON_ONCE(1);
return;
}
Expand Down Expand Up @@ -3418,7 +3418,7 @@ size_t ksize(const void *object)
return PAGE_SIZE << compound_order(page);
}

return slab_ksize(page->slab);
return slab_ksize(page->slab_cache);
}
EXPORT_SYMBOL(ksize);

Expand All @@ -3443,8 +3443,8 @@ bool verify_mem_not_deleted(const void *x)
}

slab_lock(page);
if (on_freelist(page->slab, page, object)) {
object_err(page->slab, page, object, "Object is on free-list");
if (on_freelist(page->slab_cache, page, object)) {
object_err(page->slab_cache, page, object, "Object is on free-list");
rv = false;
} else {
rv = true;
Expand Down Expand Up @@ -3475,7 +3475,7 @@ void kfree(const void *x)
__free_pages(page, compound_order(page));
return;
}
slab_free(page->slab, page, object, _RET_IP_);
slab_free(page->slab_cache, page, object, _RET_IP_);
}
EXPORT_SYMBOL(kfree);

Expand Down Expand Up @@ -3686,11 +3686,11 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)

if (n) {
list_for_each_entry(p, &n->partial, lru)
p->slab = s;
p->slab_cache = s;

#ifdef CONFIG_SLUB_DEBUG
list_for_each_entry(p, &n->full, lru)
p->slab = s;
p->slab_cache = s;
#endif
}
}
Expand Down

0 comments on commit 1b4f59e

Please sign in to comment.