Skip to content

Commit

Permalink
Merge branch 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/penberg/slab-2.6

* 'slab-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  slab: fix DEBUG_SLAB warning
  slab: shrink sizeof(struct kmem_cache)
  slab: fix DEBUG_SLAB build
  SLUB: Fix missing <linux/stacktrace.h> include
  slub: reduce overhead of slub_debug
  slub: Add method to verify memory is not freed
  slub: Enable backtrace for create/delete points
  slab allocators: Provide generic description of alignment defines
  slab, slub, slob: Unify alignment definition
  slob/lockdep: Fix gfp flags passed to lockdep
  • Loading branch information
torvalds committed Jul 22, 2011
2 parents 02f8c6a + 7ea466f commit f99b788
Show file tree
Hide file tree
Showing 7 changed files with 164 additions and 69 deletions.
20 changes: 20 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,6 +133,26 @@ unsigned int kmem_cache_size(struct kmem_cache *);
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)

/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
* Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
*/
#ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
* aligned buffers.
*/
#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
* Common kmalloc functions provided by all allocators
*/
Expand Down
52 changes: 13 additions & 39 deletions include/linux/slab_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,54 +17,26 @@

#include <trace/events/kmem.h>

/*
* Enforce a minimum alignment for the kmalloc caches.
* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
* ARCH_KMALLOC_MINALIGN allows that.
* Note that increasing this value may disable some debug features.
*/
#ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

#ifndef ARCH_SLAB_MINALIGN
/*
* Enforce a minimum alignment for all caches.
* Intended for archs that get misalignment faults even for BYTES_PER_WORD
* aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
* If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
* some debug features.
*/
#define ARCH_SLAB_MINALIGN 0
#endif

/*
* struct kmem_cache
*
* manages a cache.
*/

struct kmem_cache {
/* 1) per-cpu data, touched during every alloc/free */
struct array_cache *array[NR_CPUS];
/* 2) Cache tunables. Protected by cache_chain_mutex */
/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;

unsigned int buffer_size;
u32 reciprocal_buffer_size;
/* 3) touched by every alloc & free from the backend */
/* 2) touched by every alloc & free from the backend */

unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */

/* 4) cache_grow/shrink */
/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;

Expand All @@ -80,11 +52,11 @@ struct kmem_cache {
/* constructor func */
void (*ctor)(void *obj);

/* 5) cache creation/removal */
/* 4) cache creation/removal */
const char *name;
struct list_head next;

/* 6) statistics */
/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
Expand All @@ -111,16 +83,18 @@ struct kmem_cache {
int obj_size;
#endif /* CONFIG_DEBUG_SLAB */

/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
* We put nodelists[] at the end of kmem_cache, because we want to size
* this array to nr_node_ids slots instead of MAX_NUMNODES
* We put array[] at the end of kmem_cache, because we want to size
* this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init())
* We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of nodes.
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of cpus.
*/
struct kmem_list3 *nodelists[MAX_NUMNODES];
struct kmem_list3 **nodelists;
struct array_cache *array[NR_CPUS];
/*
* Do not add fields after nodelists[]
* Do not add fields after array[]
*/
};

Expand Down
10 changes: 0 additions & 10 deletions include/linux/slob_def.h
Original file line number Diff line number Diff line change
@@ -1,16 +1,6 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H

#ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
#endif

#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
#endif

void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);

static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
Expand Down
23 changes: 13 additions & 10 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,16 +113,6 @@ struct kmem_cache {

#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)

#ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif

#ifndef ARCH_SLAB_MINALIGN
#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
#endif

/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
Expand Down Expand Up @@ -228,6 +218,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
return ret;
}

/**
* Calling this on allocated memory will check that the memory
* is expected to be in use, and print warnings if not.
*/
#ifdef CONFIG_SLUB_DEBUG
extern bool verify_mem_not_deleted(const void *x);
#else
static inline bool verify_mem_not_deleted(const void *x)
{
return true;
}
#endif

#ifdef CONFIG_TRACING
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
Expand Down
17 changes: 9 additions & 8 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -574,7 +574,9 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };

/* internal cache of cache description objs */
static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache cache_cache = {
.nodelists = cache_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
Expand Down Expand Up @@ -1492,11 +1494,10 @@ void __init kmem_cache_init(void)
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];

/*
* struct kmem_cache size depends on nr_node_ids, which
* can be less than MAX_NUMNODES.
* struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
nr_node_ids * sizeof(struct kmem_list3 *);
cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
cache_cache.obj_size = cache_cache.buffer_size;
#endif
Expand Down Expand Up @@ -2308,6 +2309,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep)
goto oops;

cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
#if DEBUG
cachep->obj_size = size;

Expand Down Expand Up @@ -3153,12 +3155,11 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
#if ARCH_SLAB_MINALIGN
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
if (ARCH_SLAB_MINALIGN &&
((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
objp, ARCH_SLAB_MINALIGN);
objp, (int)ARCH_SLAB_MINALIGN);
}
#endif
return objp;
}
#else
Expand Down
6 changes: 6 additions & 0 deletions mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -482,6 +482,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;

gfp &= gfp_allowed_mask;

lockdep_trace_alloc(gfp);

if (size < PAGE_SIZE - align) {
Expand Down Expand Up @@ -608,6 +610,10 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;

flags &= gfp_allowed_mask;

lockdep_trace_alloc(flags);

if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
Expand Down
Loading

0 comments on commit f99b788

Please sign in to comment.