Skip to content

Commit

Permalink
mm/slab_common.c: make calculate_alignment() static
Browse files Browse the repository at this point in the history
calculate_alignment() function is only used inside slab_common.c.  So
make it static and let the compiler do more optimizations.

After this patch there's a small improvement in text and data size.

  $ gcc --version
    gcc (GCC) 7.2.1 20171128

Before:
  text	   data	    bss	    dec	     hex	filename
  9890457  3828702  1212364 14931523 e3d643	vmlinux

After:
  text	   data	    bss	    dec	     hex	filename
  9890437  3828670  1212364 14931471 e3d60f	vmlinux

Also I fixed a style problem reported by checkpatch.

  WARNING: Missing a blank line after declarations
  #53: FILE: mm/slab_common.c:286:
  +		unsigned long ralign = cache_line_size();
  +		while (size <= ralign / 2)

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Byongho Lee <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Byongho Lee authored and torvalds committed Feb 1, 2018
1 parent d984187 commit 692ae74
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 30 deletions.
3 changes: 0 additions & 3 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,6 @@ extern const struct kmalloc_info_struct {
unsigned long size;
} kmalloc_info[];

unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size);

#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
void setup_kmalloc_cache_index_table(void);
Expand Down
56 changes: 29 additions & 27 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,35 @@ static inline void memcg_unlink_cache(struct kmem_cache *s)
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */

/*
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
static unsigned long calculate_alignment(unsigned long flags,
unsigned long align, unsigned long size)
{
/*
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign;

ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}

if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;

return ALIGN(align, sizeof(void *));
}

/*
* Find a mergeable slab cache
*/
Expand Down Expand Up @@ -337,33 +366,6 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
return NULL;
}

/*
* Figure out what the alignment of the objects will be given a set of
* flags, a user specified alignment and the size of the objects.
*/
unsigned long calculate_alignment(slab_flags_t flags,
unsigned long align, unsigned long size)
{
/*
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}

if (align < ARCH_SLAB_MINALIGN)
align = ARCH_SLAB_MINALIGN;

return ALIGN(align, sizeof(void *));
}

static struct kmem_cache *create_cache(const char *name,
size_t object_size, size_t size, size_t align,
slab_flags_t flags, void (*ctor)(void *),
Expand Down

0 comments on commit 692ae74

Please sign in to comment.