Skip to content

Commit

Permalink
mm/slub: beautify code for 80 column limitation and tab alignment
Browse files Browse the repository at this point in the history
Be sure of 80 column limitation for both code and comments.

Correct tab alignment for 'if-else' statement.

Acked-by: Christoph Lameter <[email protected]>
Signed-off-by: Chen Gang <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
Chen Gang authored and penberg committed Jul 17, 2013
1 parent e35e1a9 commit d0e0ac9
Showing 1 changed file with 56 additions and 36 deletions.
92 changes: 56 additions & 36 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
#endif
{
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
slab_unlock(page);
Expand Down Expand Up @@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,

local_irq_save(flags);
slab_lock(page);
if (page->freelist == freelist_old && page->counters == counters_old) {
if (page->freelist == freelist_old &&
page->counters == counters_old) {
page->freelist = freelist_new;
page->counters = counters_new;
slab_unlock(page);
Expand Down Expand Up @@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object)

static void print_page_info(struct page *page)
{
printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
page, page->objects, page->inuse, page->freelist, page->flags);
printk(KERN_ERR
"INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
page, page->objects, page->inuse, page->freelist, page->flags);

}

Expand Down Expand Up @@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}

static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
static void slab_err(struct kmem_cache *s, struct page *page,
const char *fmt, ...)
{
va_list args;
char buf[100];
Expand Down Expand Up @@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page,
} else {
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
check_bytes_and_report(s, page, p, "Alignment padding",
endobject, POISON_INUSE, s->inuse - s->object_size);
endobject, POISON_INUSE,
s->inuse - s->object_size);
}
}

Expand Down Expand Up @@ -918,7 +923,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);

if (!alloc)
print_section("Object ", (void *)object, s->object_size);
print_section("Object ", (void *)object,
s->object_size);

dump_stack();
}
Expand All @@ -937,7 +943,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
return should_failslab(s->object_size, flags, s->flags);
}

static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
static inline void slab_post_alloc_hook(struct kmem_cache *s,
gfp_t flags, void *object)
{
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
Expand Down Expand Up @@ -1039,7 +1046,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page,
init_tracking(s, object);
}

static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page,
static noinline int alloc_debug_processing(struct kmem_cache *s,
struct page *page,
void *object, unsigned long addr)
{
if (!check_slab(s, page))
Expand Down Expand Up @@ -1743,7 +1751,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
/*
* Remove the cpu slab
*/
static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist)
static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
Expand Down Expand Up @@ -2002,7 +2011,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->pobjects = pobjects;
page->next = oldpage;

} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
#endif
}

Expand Down Expand Up @@ -2172,8 +2182,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
}

/*
* Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
* or deactivate the page.
* Check the page->freelist of a page and either transfer the freelist to the
* per cpu freelist or deactivate the page.
*
* The page is still frozen if the return value is not NULL.
*
Expand Down Expand Up @@ -2317,7 +2327,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
goto load_freelist;

/* Only entered in the debug case */
if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr))
if (kmem_cache_debug(s) &&
!alloc_debug_processing(s, page, freelist, addr))
goto new_slab; /* Slab failed checks. Next slab needed */

deactivate_slab(s, page, get_freepointer(s, freelist));
Expand Down Expand Up @@ -2385,13 +2396,15 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor.
*
* The cmpxchg does the following atomically (without lock semantics!)
* The cmpxchg does the following atomically (without lock
* semantics!)
* 1. Relocate first pointer to the current per cpu area.
* 2. Verify that tid and freelist have not been changed
* 3. If they were not changed replace tid and freelist
*
* Since this is without lock semantics the protection is only against
* code executing on this cpu *not* from access by other cpus.
* Since this is without lock semantics the protection is only
* against code executing on this cpu *not* from access by
* other cpus.
*/
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
Expand Down Expand Up @@ -2423,7 +2436,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);

trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags);
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags);

return ret;
}
Expand Down Expand Up @@ -2515,8 +2529,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if (kmem_cache_has_cpu_partial(s) && !prior)

/*
* Slab was on no list before and will be partially empty
* We can defer the list move and instead freeze it.
* Slab was on no list before and will be
* partially empty
* We can defer the list move and instead
* freeze it.
*/
new.frozen = 1;

Expand Down Expand Up @@ -3074,8 +3090,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* A) The number of objects from per cpu partial slabs dumped to the
* per node list when we reach the limit.
* B) The number of objects in cpu partial slabs to extract from the
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
* per node list when we run out of per cpu objects. We only fetch
* 50% to keep some capacity around for frees.
*/
if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
Expand All @@ -3102,8 +3118,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u "
"order=%u offset=%u flags=%lx\n",
s->name, (unsigned long)s->size, s->size, oo_order(s->oo),
s->offset, flags);
s->name, (unsigned long)s->size, s->size,
oo_order(s->oo), s->offset, flags);
return -EINVAL;
}

Expand Down Expand Up @@ -3341,7 +3357,8 @@ bool verify_mem_not_deleted(const void *x)

slab_lock(page);
if (on_freelist(page->slab_cache, page, object)) {
object_err(page->slab_cache, page, object, "Object is on free-list");
object_err(page->slab_cache, page, object,
"Object is on free-list");
rv = false;
} else {
rv = true;
Expand Down Expand Up @@ -4165,15 +4182,17 @@ static int list_locations(struct kmem_cache *s, char *buf,
!cpumask_empty(to_cpumask(l->cpus)) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " cpus=");
len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
len += cpulist_scnprintf(buf + len,
PAGE_SIZE - len - 50,
to_cpumask(l->cpus));
}

if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
len < PAGE_SIZE - 60) {
len += sprintf(buf + len, " nodes=");
len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
l->nodes);
len += nodelist_scnprintf(buf + len,
PAGE_SIZE - len - 50,
l->nodes);
}

len += sprintf(buf + len, "\n");
Expand Down Expand Up @@ -4280,7 +4299,8 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int cpu;

for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
cpu);
int node;
struct page *page;

Expand Down Expand Up @@ -4314,12 +4334,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);

if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects);
else if (flags & SO_OBJECTS)
x = atomic_long_read(&n->total_objects) -
count_partial(n, count_free);

if (flags & SO_TOTAL)
x = atomic_long_read(&n->total_objects);
else if (flags & SO_OBJECTS)
x = atomic_long_read(&n->total_objects) -
count_partial(n, count_free);
else
x = atomic_long_read(&n->nr_slabs);
total += x;
Expand Down Expand Up @@ -5135,7 +5154,8 @@ static char *create_unique_id(struct kmem_cache *s)

#ifdef CONFIG_MEMCG_KMEM
if (!is_root_cache(s))
p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg));
p += sprintf(p, "-%08d",
memcg_cache_id(s->memcg_params->memcg));
#endif

BUG_ON(p > name + ID_STR_LENGTH - 1);
Expand Down

0 comments on commit d0e0ac9

Please sign in to comment.