Skip to content

Commit

Permalink
Merge branches 'slab/cleanups', 'slab/failslab', 'slab/fixes' and 'sl…
Browse files Browse the repository at this point in the history
…ub/percpu' into slab-for-linus
  • Loading branch information
Pekka Enberg committed Mar 4, 2010
5 parents eaa5eec + f3186a9 + 4c13dd3 + 44b57f1 + 91efd77 commit e2b093f
Show file tree
Hide file tree
Showing 7 changed files with 146 additions and 260 deletions.
1 change: 1 addition & 0 deletions Documentation/vm/slub.txt
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ Possible debug options are
P Poisoning (object and padding)
U User tracking (free and alloc)
T Trace (please only use on single slabs)
A Toggle failslab filter mark for the cache
O Switch debugging off for caches that would have
caused higher minimum slab orders
- Switch all debugging off (useful if the kernel is
Expand Down
5 changes: 3 additions & 2 deletions include/linux/fault-inject.h
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,10 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
#endif /* CONFIG_FAULT_INJECTION */

#ifdef CONFIG_FAILSLAB
extern bool should_failslab(size_t size, gfp_t gfpflags);
extern bool should_failslab(size_t size, gfp_t gfpflags, unsigned long flags);
#else
static inline bool should_failslab(size_t size, gfp_t gfpflags)
static inline bool should_failslab(size_t size, gfp_t gfpflags,
unsigned long flags)
{
return false;
}
Expand Down
5 changes: 5 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@
#else
# define SLAB_NOTRACK 0x00000000UL
#endif
#ifdef CONFIG_FAILSLAB
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif

/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
Expand Down
27 changes: 12 additions & 15 deletions include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
unsigned int offset; /* Freepointer offset (in word units) */
unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
Expand Down Expand Up @@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct kmem_cache {
struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
Expand Down Expand Up @@ -104,11 +103,6 @@ struct kmem_cache {
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
#ifdef CONFIG_SMP
struct kmem_cache_cpu *cpu_slab[NR_CPUS];
#else
struct kmem_cache_cpu cpu_slab;
#endif
};

/*
Expand All @@ -135,11 +129,21 @@ struct kmem_cache {

#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif

/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];

/*
* Sorry that the following has to be that ugly but some versions of GCC
Expand Down Expand Up @@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}

#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#endif

void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);

Expand Down
18 changes: 15 additions & 3 deletions mm/failslab.c
Original file line number Diff line number Diff line change
@@ -1,25 +1,32 @@
#include <linux/fault-inject.h>
#include <linux/gfp.h>
#include <linux/slab.h>

static struct {
struct fault_attr attr;
u32 ignore_gfp_wait;
int cache_filter;
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
struct dentry *ignore_gfp_wait_file;
struct dentry *cache_filter_file;
#endif
} failslab = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1,
.cache_filter = 0,
};

bool should_failslab(size_t size, gfp_t gfpflags)
bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
{
if (gfpflags & __GFP_NOFAIL)
return false;

if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
return false;

if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
return false;

return should_fail(&failslab.attr, size);
}

Expand All @@ -30,7 +37,6 @@ static int __init setup_failslab(char *str)
__setup("failslab=", setup_failslab);

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init failslab_debugfs_init(void)
{
mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
Expand All @@ -46,8 +52,14 @@ static int __init failslab_debugfs_init(void)
debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait);

if (!failslab.ignore_gfp_wait_file) {
failslab.cache_filter_file =
debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter);

if (!failslab.ignore_gfp_wait_file ||
!failslab.cache_filter_file) {
err = -ENOMEM;
debugfs_remove(failslab.cache_filter_file);
debugfs_remove(failslab.ignore_gfp_wait_file);
cleanup_fault_attr_dentries(&failslab.attr);
}
Expand Down
13 changes: 6 additions & 7 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,6 @@ static int transfer_objects(struct array_cache *to,

from->avail -= nr;
to->avail += nr;
to->touched = 1;
return nr;
}

Expand Down Expand Up @@ -983,13 +982,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)

if (limit > 1)
limit = 12;
ac_ptr = kmalloc_node(memsize, gfp, node);
ac_ptr = kzalloc_node(memsize, gfp, node);
if (ac_ptr) {
for_each_node(i) {
if (i == node || !node_online(i)) {
ac_ptr[i] = NULL;
if (i == node || !node_online(i))
continue;
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) {
for (i--; i >= 0; i--)
Expand Down Expand Up @@ -2963,8 +2960,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
spin_lock(&l3->list_lock);

/* See if we can refill from the shared array */
if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
l3->shared->touched = 1;
goto alloc_done;
}

while (batchcount > 0) {
struct list_head *entry;
Expand Down Expand Up @@ -3101,7 +3100,7 @@ static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
if (cachep == &cache_cache)
return false;

return should_failslab(obj_size(cachep), flags);
return should_failslab(obj_size(cachep), flags, cachep->flags);
}

static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
Expand Down
Loading

0 comments on commit e2b093f

Please sign in to comment.