Skip to content

Commit

Permalink
mm: coalesce split strings
Browse files Browse the repository at this point in the history
Kernel style prefers a single string over split strings when the string is
'user-visible'.

Miscellanea:

 - Add a missing newline
 - Realign arguments

Signed-off-by: Joe Perches <[email protected]>
Acked-by: Tejun Heo <[email protected]>	[percpu]
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JoePerches authored and torvalds committed Mar 17, 2016
1 parent 598d809 commit 756a025
Show file tree
Hide file tree
Showing 20 changed files with 78 additions and 109 deletions.
10 changes: 4 additions & 6 deletions mm/dmapool.c
Original file line number Diff line number Diff line change
Expand Up @@ -452,13 +452,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
}
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
pool->name, (unsigned long long)dma);
else
printk(KERN_ERR "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
pool->name, (unsigned long long)dma);
return;
}
}
Expand Down
3 changes: 1 addition & 2 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,7 @@ static void set_recommended_min_free_kbytes(void)

if (recommended_min > min_free_kbytes) {
if (user_min_free_kbytes >= 0)
pr_info("raising min_free_kbytes from %d to %lu "
"to help transparent hugepage allocations\n",
pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
min_free_kbytes, recommended_min);

min_free_kbytes = recommended_min;
Expand Down
6 changes: 2 additions & 4 deletions mm/kasan/report.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,8 +214,7 @@ static void kasan_report_error(struct kasan_access_info *info)
*/
kasan_disable_current();
spin_lock_irqsave(&report_lock, flags);
pr_err("================================="
"=================================\n");
pr_err("==================================================================\n");
if (info->access_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
if ((unsigned long)info->access_addr < PAGE_SIZE)
Expand All @@ -236,8 +235,7 @@ static void kasan_report_error(struct kasan_access_info *info)
print_address_description(info);
print_shadow_for_address(info->first_bad_addr);
}
pr_err("================================="
"=================================\n");
pr_err("==================================================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, flags);
kasan_enable_current();
Expand Down
3 changes: 1 addition & 2 deletions mm/kmemcheck.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) {
if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate "
"shadow bitmap\n");
printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n");
return;
}

Expand Down
18 changes: 8 additions & 10 deletions mm/kmemleak.c
Original file line number Diff line number Diff line change
Expand Up @@ -596,8 +596,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
else if (parent->pointer + parent->size <= ptr)
link = &parent->rb_node.rb_right;
else {
kmemleak_stop("Cannot insert 0x%lx into the object "
"search tree (overlaps existing)\n",
kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
ptr);
/*
* No need for parent->lock here since "parent" cannot
Expand Down Expand Up @@ -670,8 +669,8 @@ static void delete_object_part(unsigned long ptr, size_t size)
object = find_and_remove_object(ptr, 1);
if (!object) {
#ifdef DEBUG
kmemleak_warn("Partially freeing unknown object at 0x%08lx "
"(size %zu)\n", ptr, size);
kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
ptr, size);
#endif
return;
}
Expand Down Expand Up @@ -717,8 +716,8 @@ static void paint_ptr(unsigned long ptr, int color)

object = find_and_get_object(ptr, 0);
if (!object) {
kmemleak_warn("Trying to color unknown object "
"at 0x%08lx as %s\n", ptr,
kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
ptr,
(color == KMEMLEAK_GREY) ? "Grey" :
(color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
return;
Expand Down Expand Up @@ -1463,8 +1462,8 @@ static void kmemleak_scan(void)
if (new_leaks) {
kmemleak_found_leaks = true;

pr_info("%d new suspected memory leaks (see "
"/sys/kernel/debug/kmemleak)\n", new_leaks);
pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
new_leaks);
}

}
Expand Down Expand Up @@ -1795,8 +1794,7 @@ static void kmemleak_do_cleanup(struct work_struct *work)
if (!kmemleak_found_leaks)
__kmemleak_do_cleanup();
else
pr_info("Kmemleak disabled without freeing internal data. "
"Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
}

static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
Expand Down
3 changes: 1 addition & 2 deletions mm/memblock.c
Original file line number Diff line number Diff line change
Expand Up @@ -238,8 +238,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
* so we use WARN_ONCE() here to see the stack trace if
* fail happens.
*/
WARN_ONCE(1, "memblock: bottom-up allocation failed, "
"memory hotunplug may be affected\n");
WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
}

return __memblock_find_range_top_down(start, end, size, align, nid,
Expand Down
3 changes: 1 addition & 2 deletions mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1970,8 +1970,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)

beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
pr_warn("removing memory fails, because memory "
"[%pa-%pa] is onlined\n",
pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
&beginpa, &endpa);
}

Expand Down
4 changes: 1 addition & 3 deletions mm/mempolicy.c
Original file line number Diff line number Diff line change
Expand Up @@ -2559,9 +2559,7 @@ static void __init check_numabalancing_enable(void)
set_numabalancing_state(numabalancing_override == 1);

if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. "
"Configure with numa_balancing= or the "
"kernel.numa_balancing sysctl",
pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
Expand Down
8 changes: 3 additions & 5 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -2517,9 +2517,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long ret = -EINVAL;
struct file *file;

pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. "
"See Documentation/vm/remap_file_pages.txt.\n",
current->comm, current->pid);
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
current->comm, current->pid);

if (prot)
return ret;
Expand Down Expand Up @@ -2885,8 +2884,7 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
if (is_data_mapping(flags) &&
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
if (ignore_rlimit_data)
pr_warn_once("%s (%d): VmData %lu exceed data ulimit "
"%lu. Will be forbidden soon.\n",
pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Will be forbidden soon.\n",
current->comm, current->pid,
(mm->data_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA));
Expand Down
3 changes: 1 addition & 2 deletions mm/oom_kill.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,8 +383,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p,
struct mem_cgroup *memcg)
{
pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, "
"oom_score_adj=%hd\n",
pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
current->signal->oom_score_adj);

Expand Down
37 changes: 17 additions & 20 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4074,8 +4074,7 @@ static int __parse_numa_zonelist_order(char *s)
user_zonelist_order = ZONELIST_ORDER_ZONE;
} else {
printk(KERN_WARNING
"Ignoring invalid numa_zonelist_order value: "
"%s\n", s);
"Ignoring invalid numa_zonelist_order value: %s\n", s);
return -EINVAL;
}
return 0;
Expand Down Expand Up @@ -4539,12 +4538,11 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
else
page_group_by_mobility_disabled = 0;

pr_info("Built %i zonelists in %s order, mobility grouping %s. "
"Total pages: %ld\n",
nr_online_nodes,
zonelist_order_name[current_zonelist_order],
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
nr_online_nodes,
zonelist_order_name[current_zonelist_order],
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);
#ifdef CONFIG_NUMA
pr_info("Policy zone: %s\n", zone_names[policy_zone]);
#endif
Expand Down Expand Up @@ -6142,22 +6140,21 @@ void __init mem_init_print_info(const char *str)

#undef adj_init_size

pr_info("Memory: %luK/%luK available "
"(%luK kernel code, %luK rwdata, %luK rodata, "
"%luK init, %luK bss, %luK reserved, %luK cma-reserved"
pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
#ifdef CONFIG_HIGHMEM
", %luK highmem"
", %luK highmem"
#endif
"%s%s)\n",
nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
codesize >> 10, datasize >> 10, rosize >> 10,
(init_data_size + init_code_size) >> 10, bss_size >> 10,
(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10),
totalcma_pages << (PAGE_SHIFT-10),
"%s%s)\n",
nr_free_pages() << (PAGE_SHIFT - 10),
physpages << (PAGE_SHIFT - 10),
codesize >> 10, datasize >> 10, rosize >> 10,
(init_data_size + init_code_size) >> 10, bss_size >> 10,
(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
totalcma_pages << (PAGE_SHIFT - 10),
#ifdef CONFIG_HIGHMEM
totalhigh_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT - 10),
#endif
str ? ", " : "", str ? str : "");
str ? ", " : "", str ? str : "");
}

/**
Expand Down
5 changes: 2 additions & 3 deletions mm/page_owner.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,8 @@ void __dump_page_owner(struct page *page)
return;
}

pr_alert("page allocated via order %u, migratetype %s, "
"gfp_mask %#x(%pGg)\n", page_ext->order,
migratetype_names[mt], gfp_mask, &gfp_mask);
pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
print_stack_trace(&trace, 0);

if (page_ext->last_migrate_reason != -1)
Expand Down
4 changes: 2 additions & 2 deletions mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -888,8 +888,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
size = ALIGN(size, 2);

if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
"percpu allocation\n", size, align);
WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
}

Expand Down
28 changes: 10 additions & 18 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1566,11 +1566,9 @@ static void dump_line(char *data, int offset, int limit)
if (bad_count == 1) {
error ^= POISON_FREE;
if (!(error & (error - 1))) {
printk(KERN_ERR "Single bit error detected. Probably "
"bad RAM.\n");
printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
#ifdef CONFIG_X86
printk(KERN_ERR "Run memtest86+ or a similar memory "
"test tool.\n");
printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
#else
printk(KERN_ERR "Run a memory test tool.\n");
#endif
Expand Down Expand Up @@ -1693,11 +1691,9 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object "
"was overwritten");
slab_error(cachep, "start of a freed object was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object "
"was overwritten");
slab_error(cachep, "end of a freed object was overwritten");
}
}
}
Expand Down Expand Up @@ -2398,11 +2394,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)

if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" end of an object");
slab_error(cachep, "constructor overwrote the end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" start of an object");
slab_error(cachep, "constructor overwrote the start of an object");
}
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON) {
Expand Down Expand Up @@ -2469,8 +2463,8 @@ static void slab_put_obj(struct kmem_cache *cachep,
/* Verify double free bug */
for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n",
cachep->name, objp);
BUG();
}
}
Expand Down Expand Up @@ -2901,8 +2895,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
slab_error(cachep, "double free, or memory outside object was overwritten");
printk(KERN_ERR
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
Expand Down Expand Up @@ -4028,8 +4021,7 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow;

seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
"%4lu %4lu %4lu %4lu %4lu",
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
node_frees, overflows);
Expand Down
10 changes: 4 additions & 6 deletions mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -726,8 +726,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
err = shutdown_cache(s, &release, &need_rcu_barrier);

if (err) {
pr_err("kmem_cache_destroy %s: "
"Slab cache still has objects\n", s->name);
pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
s->name);
dump_stack();
}
out_unlock:
Expand Down Expand Up @@ -1047,13 +1047,11 @@ static void print_slabinfo_header(struct seq_file *m)
#else
seq_puts(m, "slabinfo - version: 2.1\n");
#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
"<objperslab> <pagesperslab>");
seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_putc(m, '\n');
Expand Down
Loading

0 comments on commit 756a025

Please sign in to comment.