Skip to content

Commit

Permalink
slob: Rework #ifdeffery in slab.h
Browse files Browse the repository at this point in the history
Make the SLOB specific stuff harmonize more with the way the other allocators
do it. Create the typical kmalloc constants for that purpose. SLOB does not
support it but the constants help us avoid #ifdefs.

Signed-off-by: Christoph Lameter <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
Christoph Lameter authored and penberg committed Jun 18, 2013
1 parent d0d04b7 commit 069e2b3
Showing 1 changed file with 28 additions and 11 deletions.
39 changes: 28 additions & 11 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,7 @@ struct kmem_cache {
struct list_head list; /* List of all slab caches on the system */
};

#define KMALLOC_MAX_SIZE (1UL << 30)

#include <linux/slob_def.h>

#else /* CONFIG_SLOB */
#endif /* CONFIG_SLOB */

/*
* Kmalloc array related definitions
Expand All @@ -195,7 +191,9 @@ struct kmem_cache {
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 5
#endif
#else
#endif

#ifdef CONFIG_SLUB
/*
* SLUB allocates up to order 2 pages directly and otherwise
* passes the request to the page allocator.
Expand All @@ -207,6 +205,19 @@ struct kmem_cache {
#endif
#endif

#ifdef CONFIG_SLOB
/*
* SLOB passes all page size and larger requests to the page allocator.
* No kmalloc array is necessary since objects of different sizes can
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_MAX 30
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
#endif

/* Maximum allocatable size */
#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
/* Maximum size for which we actually use a slab cache */
Expand All @@ -221,6 +232,7 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif

#ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
Expand Down Expand Up @@ -275,13 +287,18 @@ static __always_inline int kmalloc_index(size_t size)
/* Will never be reached. Needed because the compiler may complain */
return -1;
}
#endif /* !CONFIG_SLOB */

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#elif defined(CONFIG_SLUB)
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#else
#error "Unknown slab allocator"
#endif

#ifdef CONFIG_SLOB
#include <linux/slob_def.h>
#endif

/*
Expand All @@ -291,6 +308,7 @@ static __always_inline int kmalloc_index(size_t size)
*/
static __always_inline int kmalloc_size(int n)
{
#ifndef CONFIG_SLOB
if (n > 2)
return 1 << n;

Expand All @@ -299,10 +317,9 @@ static __always_inline int kmalloc_size(int n)

if (n == 2 && KMALLOC_MIN_SIZE <= 64)
return 192;

#endif
return 0;
}
#endif /* !CONFIG_SLOB */

/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
Expand Down

0 comments on commit 069e2b3

Please sign in to comment.