Skip to content

Commit

Permalink
slub: Make cpu partial slab support configurable
Browse files Browse the repository at this point in the history
CPU partial support can introduce level of indeterminism that is not
wanted in certain context (like a realtime kernel). Make it
configurable.

This patch is based on Christoph Lameter's "slub: Make cpu partial slab
support configurable V2".

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Joonsoo Kim <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
JoonsooKim authored and penberg committed Jul 7, 2013
1 parent e7efa61 commit 345c905
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 6 deletions.
11 changes: 11 additions & 0 deletions init/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1511,6 +1511,17 @@ config SLOB

endchoice

config SLUB_CPU_PARTIAL
default y
depends on SLUB
bool "SLUB per cpu partial cache"
help
Per cpu partial caches accellerate objects allocation and freeing
that is local to a processor at the price of more indeterminism
in the latency of the free. On overflow these caches will be cleared
which requires the taking of locks that may cause latency spikes.
Typically one would choose no for a realtime system.

config MMAP_ALLOW_UNINITIALIZED
bool "Allow mmapped anonymous memory to be uninitialized"
depends on EXPERT && !MMU
Expand Down
27 changes: 21 additions & 6 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}

static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
return !kmem_cache_debug(s);
#else
return false;
#endif
}

/*
* Issues still to be resolved:
*
Expand Down Expand Up @@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
put_cpu_partial(s, page, 0);
stat(s, CPU_PARTIAL_NODE);
}
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
if (!kmem_cache_has_cpu_partial(s)
|| available > s->cpu_partial / 2)
break;

}
Expand Down Expand Up @@ -1883,6 +1893,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freel
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;

Expand Down Expand Up @@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
#endif
}

/*
Expand All @@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s,
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct page *oldpage;
int pages;
int pobjects;
Expand Down Expand Up @@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;

} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
#endif
}

static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
Expand Down Expand Up @@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
new.inuse--;
if ((!new.inuse || !prior) && !was_frozen) {

if (!kmem_cache_debug(s) && !prior)
if (kmem_cache_has_cpu_partial(s) && !prior)

/*
* Slab was on no list before and will be partially empty
Expand Down Expand Up @@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
if (kmem_cache_debug(s) && unlikely(!prior)) {
remove_full(s, page);
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
remove_full(s, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
Expand Down Expand Up @@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
*/
if (kmem_cache_debug(s))
if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
Expand Down Expand Up @@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
err = strict_strtoul(buf, 10, &objects);
if (err)
return err;
if (objects && kmem_cache_debug(s))
if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;

s->cpu_partial = objects;
Expand Down

0 comments on commit 345c905

Please sign in to comment.