Skip to content

Commit

Permalink
mm, slab: suppress out of memory warning unless debug is enabled
Browse files Browse the repository at this point in the history
When the slab or slub allocators cannot allocate additional slab pages,
they emit diagnostic information to the kernel log such as current
number of slabs, number of objects, active objects, etc.  This is always
coupled with a page allocation failure warning since it is controlled by
!__GFP_NOWARN.

Suppress this out of memory warning if the allocator is configured
without debug supported.  The page allocation failure warning will
indicate it is a failed slab allocation, the order, and the gfp mask, so
this is only useful to diagnose allocator issues.

Since CONFIG_SLUB_DEBUG is already enabled by default for the slub
allocator, there is no functional change with this patch.  If debug is
disabled, however, the warnings are now suppressed.

Signed-off-by: David Rientjes <[email protected]>
Cc: Pekka Enberg <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rientjes authored and torvalds committed Jun 4, 2014
1 parent ecc42fb commit 9a02d69
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 14 deletions.
10 changes: 8 additions & 2 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1621,10 +1621,16 @@ __initcall(cpucache_init);
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
#if DEBUG
struct kmem_cache_node *n;
struct page *page;
unsigned long flags;
int node;
static DEFINE_RATELIMIT_STATE(slab_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);

if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slab_oom_rs))
return;

printk(KERN_WARNING
"SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
Expand Down Expand Up @@ -1662,6 +1668,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
node, active_slabs, num_slabs, active_objs, num_objs,
free_objects);
}
#endif
}

/*
Expand All @@ -1683,8 +1690,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,

page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
if (!page) {
if (!(flags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(cachep, flags, nodeid);
slab_out_of_memory(cachep, flags, nodeid);
return NULL;
}

Expand Down
29 changes: 17 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2119,11 +2119,19 @@ static inline int node_match(struct page *page, int node)
return 1;
}

#ifdef CONFIG_SLUB_DEBUG
static int count_free(struct page *page)
{
return page->objects - page->inuse;
}

static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
{
return atomic_long_read(&n->total_objects);
}
#endif /* CONFIG_SLUB_DEBUG */

#if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
static unsigned long count_partial(struct kmem_cache_node *n,
int (*get_count)(struct page *))
{
Expand All @@ -2137,21 +2145,19 @@ static unsigned long count_partial(struct kmem_cache_node *n,
spin_unlock_irqrestore(&n->list_lock, flags);
return x;
}

static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
{
#ifdef CONFIG_SLUB_DEBUG
return atomic_long_read(&n->total_objects);
#else
return 0;
#endif
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */

static noinline void
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
{
#ifdef CONFIG_SLUB_DEBUG
static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
int node;

if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
return;

pr_warn("SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
nid, gfpflags);
pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
Expand All @@ -2178,6 +2184,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
node, nr_slabs, nr_objs, nr_free);
}
#endif
}

static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
Expand Down Expand Up @@ -2356,9 +2363,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
freelist = new_slab_objects(s, gfpflags, node, &c);

if (unlikely(!freelist)) {
if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
slab_out_of_memory(s, gfpflags, node);

slab_out_of_memory(s, gfpflags, node);
local_irq_restore(flags);
return NULL;
}
Expand Down

0 comments on commit 9a02d69

Please sign in to comment.