Commit 756a025f authored by Joe Perches's avatar Joe Perches Committed by Linus Torvalds

mm: coalesce split strings

Kernel style prefers a single string over split strings when the string is
'user-visible'.

Miscellanea:

 - Add a missing newline
 - Realign arguments
Signed-off-by: default avatarJoe Perches <joe@perches.com>
Acked-by: Tejun Heo <tj@kernel.org>	[percpu]
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 598d8091
...@@ -452,13 +452,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) ...@@ -452,13 +452,11 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
} }
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev) if (pool->dev)
dev_err(pool->dev, "dma_pool_free %s, dma %Lx " dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n",
"already free\n", pool->name, pool->name, (unsigned long long)dma);
(unsigned long long)dma);
else else
printk(KERN_ERR "dma_pool_free %s, dma %Lx " printk(KERN_ERR "dma_pool_free %s, dma %Lx already free\n",
"already free\n", pool->name, pool->name, (unsigned long long)dma);
(unsigned long long)dma);
return; return;
} }
} }
......
...@@ -168,8 +168,7 @@ static void set_recommended_min_free_kbytes(void) ...@@ -168,8 +168,7 @@ static void set_recommended_min_free_kbytes(void)
if (recommended_min > min_free_kbytes) { if (recommended_min > min_free_kbytes) {
if (user_min_free_kbytes >= 0) if (user_min_free_kbytes >= 0)
pr_info("raising min_free_kbytes from %d to %lu " pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
"to help transparent hugepage allocations\n",
min_free_kbytes, recommended_min); min_free_kbytes, recommended_min);
min_free_kbytes = recommended_min; min_free_kbytes = recommended_min;
......
...@@ -214,8 +214,7 @@ static void kasan_report_error(struct kasan_access_info *info) ...@@ -214,8 +214,7 @@ static void kasan_report_error(struct kasan_access_info *info)
*/ */
kasan_disable_current(); kasan_disable_current();
spin_lock_irqsave(&report_lock, flags); spin_lock_irqsave(&report_lock, flags);
pr_err("=================================" pr_err("==================================================================\n");
"=================================\n");
if (info->access_addr < if (info->access_addr <
kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
if ((unsigned long)info->access_addr < PAGE_SIZE) if ((unsigned long)info->access_addr < PAGE_SIZE)
...@@ -236,8 +235,7 @@ static void kasan_report_error(struct kasan_access_info *info) ...@@ -236,8 +235,7 @@ static void kasan_report_error(struct kasan_access_info *info)
print_address_description(info); print_address_description(info);
print_shadow_for_address(info->first_bad_addr); print_shadow_for_address(info->first_bad_addr);
} }
pr_err("=================================" pr_err("==================================================================\n");
"=================================\n");
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irqrestore(&report_lock, flags); spin_unlock_irqrestore(&report_lock, flags);
kasan_enable_current(); kasan_enable_current();
......
...@@ -20,8 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) ...@@ -20,8 +20,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order); shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
if (!shadow) { if (!shadow) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_ERR "kmemcheck: failed to allocate " printk(KERN_ERR "kmemcheck: failed to allocate shadow bitmap\n");
"shadow bitmap\n");
return; return;
} }
......
...@@ -596,8 +596,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, ...@@ -596,8 +596,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
else if (parent->pointer + parent->size <= ptr) else if (parent->pointer + parent->size <= ptr)
link = &parent->rb_node.rb_right; link = &parent->rb_node.rb_right;
else { else {
kmemleak_stop("Cannot insert 0x%lx into the object " kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
"search tree (overlaps existing)\n",
ptr); ptr);
/* /*
* No need for parent->lock here since "parent" cannot * No need for parent->lock here since "parent" cannot
...@@ -670,8 +669,8 @@ static void delete_object_part(unsigned long ptr, size_t size) ...@@ -670,8 +669,8 @@ static void delete_object_part(unsigned long ptr, size_t size)
object = find_and_remove_object(ptr, 1); object = find_and_remove_object(ptr, 1);
if (!object) { if (!object) {
#ifdef DEBUG #ifdef DEBUG
kmemleak_warn("Partially freeing unknown object at 0x%08lx " kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
"(size %zu)\n", ptr, size); ptr, size);
#endif #endif
return; return;
} }
...@@ -717,8 +716,8 @@ static void paint_ptr(unsigned long ptr, int color) ...@@ -717,8 +716,8 @@ static void paint_ptr(unsigned long ptr, int color)
object = find_and_get_object(ptr, 0); object = find_and_get_object(ptr, 0);
if (!object) { if (!object) {
kmemleak_warn("Trying to color unknown object " kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
"at 0x%08lx as %s\n", ptr, ptr,
(color == KMEMLEAK_GREY) ? "Grey" : (color == KMEMLEAK_GREY) ? "Grey" :
(color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
return; return;
...@@ -1463,8 +1462,8 @@ static void kmemleak_scan(void) ...@@ -1463,8 +1462,8 @@ static void kmemleak_scan(void)
if (new_leaks) { if (new_leaks) {
kmemleak_found_leaks = true; kmemleak_found_leaks = true;
pr_info("%d new suspected memory leaks (see " pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
"/sys/kernel/debug/kmemleak)\n", new_leaks); new_leaks);
} }
} }
...@@ -1795,8 +1794,7 @@ static void kmemleak_do_cleanup(struct work_struct *work) ...@@ -1795,8 +1794,7 @@ static void kmemleak_do_cleanup(struct work_struct *work)
if (!kmemleak_found_leaks) if (!kmemleak_found_leaks)
__kmemleak_do_cleanup(); __kmemleak_do_cleanup();
else else
pr_info("Kmemleak disabled without freeing internal data. " pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
"Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
} }
static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
......
...@@ -238,8 +238,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, ...@@ -238,8 +238,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
* so we use WARN_ONCE() here to see the stack trace if * so we use WARN_ONCE() here to see the stack trace if
* fail happens. * fail happens.
*/ */
WARN_ONCE(1, "memblock: bottom-up allocation failed, " WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
"memory hotunplug may be affected\n");
} }
return __memblock_find_range_top_down(start, end, size, align, nid, return __memblock_find_range_top_down(start, end, size, align, nid,
......
...@@ -1970,8 +1970,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) ...@@ -1970,8 +1970,7 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
pr_warn("removing memory fails, because memory " pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
"[%pa-%pa] is onlined\n",
&beginpa, &endpa); &beginpa, &endpa);
} }
......
...@@ -2559,9 +2559,7 @@ static void __init check_numabalancing_enable(void) ...@@ -2559,9 +2559,7 @@ static void __init check_numabalancing_enable(void)
set_numabalancing_state(numabalancing_override == 1); set_numabalancing_state(numabalancing_override == 1);
if (num_online_nodes() > 1 && !numabalancing_override) { if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. " pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
"Configure with numa_balancing= or the "
"kernel.numa_balancing sysctl",
numabalancing_default ? "Enabling" : "Disabling"); numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default); set_numabalancing_state(numabalancing_default);
} }
......
...@@ -2517,9 +2517,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, ...@@ -2517,9 +2517,8 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
unsigned long ret = -EINVAL; unsigned long ret = -EINVAL;
struct file *file; struct file *file;
pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. " pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n",
"See Documentation/vm/remap_file_pages.txt.\n", current->comm, current->pid);
current->comm, current->pid);
if (prot) if (prot)
return ret; return ret;
...@@ -2885,8 +2884,7 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) ...@@ -2885,8 +2884,7 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
if (is_data_mapping(flags) && if (is_data_mapping(flags) &&
mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
if (ignore_rlimit_data) if (ignore_rlimit_data)
pr_warn_once("%s (%d): VmData %lu exceed data ulimit " pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Will be forbidden soon.\n",
"%lu. Will be forbidden soon.\n",
current->comm, current->pid, current->comm, current->pid,
(mm->data_vm + npages) << PAGE_SHIFT, (mm->data_vm + npages) << PAGE_SHIFT,
rlimit(RLIMIT_DATA)); rlimit(RLIMIT_DATA));
......
...@@ -383,8 +383,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) ...@@ -383,8 +383,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
static void dump_header(struct oom_control *oc, struct task_struct *p, static void dump_header(struct oom_control *oc, struct task_struct *p,
struct mem_cgroup *memcg) struct mem_cgroup *memcg)
{ {
pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, " pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
"oom_score_adj=%hd\n",
current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order, current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
current->signal->oom_score_adj); current->signal->oom_score_adj);
......
...@@ -4074,8 +4074,7 @@ static int __parse_numa_zonelist_order(char *s) ...@@ -4074,8 +4074,7 @@ static int __parse_numa_zonelist_order(char *s)
user_zonelist_order = ZONELIST_ORDER_ZONE; user_zonelist_order = ZONELIST_ORDER_ZONE;
} else { } else {
printk(KERN_WARNING printk(KERN_WARNING
"Ignoring invalid numa_zonelist_order value: " "Ignoring invalid numa_zonelist_order value: %s\n", s);
"%s\n", s);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -4539,12 +4538,11 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) ...@@ -4539,12 +4538,11 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
else else
page_group_by_mobility_disabled = 0; page_group_by_mobility_disabled = 0;
pr_info("Built %i zonelists in %s order, mobility grouping %s. " pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
"Total pages: %ld\n", nr_online_nodes,
nr_online_nodes, zonelist_order_name[current_zonelist_order],
zonelist_order_name[current_zonelist_order], page_group_by_mobility_disabled ? "off" : "on",
page_group_by_mobility_disabled ? "off" : "on", vm_total_pages);
vm_total_pages);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
pr_info("Policy zone: %s\n", zone_names[policy_zone]); pr_info("Policy zone: %s\n", zone_names[policy_zone]);
#endif #endif
...@@ -6142,22 +6140,21 @@ void __init mem_init_print_info(const char *str) ...@@ -6142,22 +6140,21 @@ void __init mem_init_print_info(const char *str)
#undef adj_init_size #undef adj_init_size
pr_info("Memory: %luK/%luK available " pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
"(%luK kernel code, %luK rwdata, %luK rodata, "
"%luK init, %luK bss, %luK reserved, %luK cma-reserved"
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
", %luK highmem" ", %luK highmem"
#endif #endif
"%s%s)\n", "%s%s)\n",
nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), nr_free_pages() << (PAGE_SHIFT - 10),
codesize >> 10, datasize >> 10, rosize >> 10, physpages << (PAGE_SHIFT - 10),
(init_data_size + init_code_size) >> 10, bss_size >> 10, codesize >> 10, datasize >> 10, rosize >> 10,
(physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), (init_data_size + init_code_size) >> 10, bss_size >> 10,
totalcma_pages << (PAGE_SHIFT-10), (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
totalcma_pages << (PAGE_SHIFT - 10),
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
totalhigh_pages << (PAGE_SHIFT-10), totalhigh_pages << (PAGE_SHIFT - 10),
#endif #endif
str ? ", " : "", str ? str : ""); str ? ", " : "", str ? str : "");
} }
/** /**
......
...@@ -198,9 +198,8 @@ void __dump_page_owner(struct page *page) ...@@ -198,9 +198,8 @@ void __dump_page_owner(struct page *page)
return; return;
} }
pr_alert("page allocated via order %u, migratetype %s, " pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
"gfp_mask %#x(%pGg)\n", page_ext->order, page_ext->order, migratetype_names[mt], gfp_mask, &gfp_mask);
migratetype_names[mt], gfp_mask, &gfp_mask);
print_stack_trace(&trace, 0); print_stack_trace(&trace, 0);
if (page_ext->last_migrate_reason != -1) if (page_ext->last_migrate_reason != -1)
......
...@@ -888,8 +888,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, ...@@ -888,8 +888,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
size = ALIGN(size, 2); size = ALIGN(size, 2);
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for " WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
"percpu allocation\n", size, align); size, align);
return NULL; return NULL;
} }
......
...@@ -1566,11 +1566,9 @@ static void dump_line(char *data, int offset, int limit) ...@@ -1566,11 +1566,9 @@ static void dump_line(char *data, int offset, int limit)
if (bad_count == 1) { if (bad_count == 1) {
error ^= POISON_FREE; error ^= POISON_FREE;
if (!(error & (error - 1))) { if (!(error & (error - 1))) {
printk(KERN_ERR "Single bit error detected. Probably " printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
"bad RAM.\n");
#ifdef CONFIG_X86 #ifdef CONFIG_X86
printk(KERN_ERR "Run memtest86+ or a similar memory " printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
"test tool.\n");
#else #else
printk(KERN_ERR "Run a memory test tool.\n"); printk(KERN_ERR "Run a memory test tool.\n");
#endif #endif
...@@ -1693,11 +1691,9 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep, ...@@ -1693,11 +1691,9 @@ static void slab_destroy_debugcheck(struct kmem_cache *cachep,
} }
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object " slab_error(cachep, "start of a freed object was overwritten");
"was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object " slab_error(cachep, "end of a freed object was overwritten");
"was overwritten");
} }
} }
} }
...@@ -2398,11 +2394,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page) ...@@ -2398,11 +2394,9 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct page *page)
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the" slab_error(cachep, "constructor overwrote the end of an object");
" end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the" slab_error(cachep, "constructor overwrote the start of an object");
" start of an object");
} }
/* need to poison the objs? */ /* need to poison the objs? */
if (cachep->flags & SLAB_POISON) { if (cachep->flags & SLAB_POISON) {
...@@ -2469,8 +2463,8 @@ static void slab_put_obj(struct kmem_cache *cachep, ...@@ -2469,8 +2463,8 @@ static void slab_put_obj(struct kmem_cache *cachep,
/* Verify double free bug */ /* Verify double free bug */
for (i = page->active; i < cachep->num; i++) { for (i = page->active; i < cachep->num; i++) {
if (get_free_obj(page, i) == objnr) { if (get_free_obj(page, i) == objnr) {
printk(KERN_ERR "slab: double free detected in cache " printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n",
"'%s', objp %p\n", cachep->name, objp); cachep->name, objp);
BUG(); BUG();
} }
} }
...@@ -2901,8 +2895,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, ...@@ -2901,8 +2895,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
if (cachep->flags & SLAB_RED_ZONE) { if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) { *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside" slab_error(cachep, "double free, or memory outside object was overwritten");
" object was overwritten");
printk(KERN_ERR printk(KERN_ERR
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n", "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp), objp, *dbg_redzone1(cachep, objp),
...@@ -4028,8 +4021,7 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) ...@@ -4028,8 +4021,7 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
unsigned long node_frees = cachep->node_frees; unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow; unsigned long overflows = cachep->node_overflow;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu " seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu %4lu %4lu %4lu %4lu %4lu",
"%4lu %4lu %4lu %4lu %4lu",
allocs, high, grown, allocs, high, grown,
reaped, errors, max_freeable, node_allocs, reaped, errors, max_freeable, node_allocs,
node_frees, overflows); node_frees, overflows);
......
...@@ -726,8 +726,8 @@ void kmem_cache_destroy(struct kmem_cache *s) ...@@ -726,8 +726,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
err = shutdown_cache(s, &release, &need_rcu_barrier); err = shutdown_cache(s, &release, &need_rcu_barrier);
if (err) { if (err) {
pr_err("kmem_cache_destroy %s: " pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
"Slab cache still has objects\n", s->name); s->name);
dump_stack(); dump_stack();
} }
out_unlock: out_unlock:
...@@ -1047,13 +1047,11 @@ static void print_slabinfo_header(struct seq_file *m) ...@@ -1047,13 +1047,11 @@ static void print_slabinfo_header(struct seq_file *m)
#else #else
seq_puts(m, "slabinfo - version: 2.1\n"); seq_puts(m, "slabinfo - version: 2.1\n");
#endif #endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> " seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#ifdef CONFIG_DEBUG_SLAB #ifdef CONFIG_DEBUG_SLAB
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif #endif
seq_putc(m, '\n'); seq_putc(m, '\n');
......
...@@ -950,14 +950,14 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) ...@@ -950,14 +950,14 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
max_objects = MAX_OBJS_PER_PAGE; max_objects = MAX_OBJS_PER_PAGE;
if (page->objects != max_objects) { if (page->objects != max_objects) {
slab_err(s, page, "Wrong number of objects. Found %d but " slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
"should be %d", page->objects, max_objects); page->objects, max_objects);
page->objects = max_objects; page->objects = max_objects;
slab_fix(s, "Number of objects adjusted."); slab_fix(s, "Number of objects adjusted.");
} }
if (page->inuse != page->objects - nr) { if (page->inuse != page->objects - nr) {
slab_err(s, page, "Wrong object count. Counter is %d but " slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
"counted were %d", page->inuse, page->objects - nr); page->inuse, page->objects - nr);
page->inuse = page->objects - nr; page->inuse = page->objects - nr;
slab_fix(s, "Object count adjusted."); slab_fix(s, "Object count adjusted.");
} }
...@@ -1117,8 +1117,8 @@ static inline int free_consistency_checks(struct kmem_cache *s, ...@@ -1117,8 +1117,8 @@ static inline int free_consistency_checks(struct kmem_cache *s,
if (unlikely(s != page->slab_cache)) { if (unlikely(s != page->slab_cache)) {
if (!PageSlab(page)) { if (!PageSlab(page)) {
slab_err(s, page, "Attempt to free object(0x%p) " slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
"outside of slab", object); object);
} else if (!page->slab_cache) { } else if (!page->slab_cache) {
pr_err("SLUB <none>: no slab for object 0x%p.\n", pr_err("SLUB <none>: no slab for object 0x%p.\n",
object); object);
...@@ -3439,10 +3439,9 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) ...@@ -3439,10 +3439,9 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
free_kmem_cache_nodes(s); free_kmem_cache_nodes(s);
error: error:
if (flags & SLAB_PANIC) if (flags & SLAB_PANIC)
panic("Cannot create slab %s size=%lu realsize=%u " panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
"order=%u offset=%u flags=%lx\n", s->name, (unsigned long)s->size, s->size,
s->name, (unsigned long)s->size, s->size, oo_order(s->oo), s->offset, flags);
oo_order(s->oo), s->offset, flags);
return -EINVAL; return -EINVAL;
} }
......
...@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node, ...@@ -166,8 +166,8 @@ void __meminit vmemmap_verify(pte_t *pte, int node,
int actual_node = early_pfn_to_nid(pfn); int actual_node = early_pfn_to_nid(pfn);
if (node_distance(actual_node, node) > LOCAL_DISTANCE) if (node_distance(actual_node, node) > LOCAL_DISTANCE)
printk(KERN_WARNING "[%lx-%lx] potential offnode " printk(KERN_WARNING "[%lx-%lx] potential offnode page_structs\n",
"page_structs\n", start, end - 1); start, end - 1);
} }
pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
...@@ -292,8 +292,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, ...@@ -292,8 +292,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (map_map[pnum]) if (map_map[pnum])
continue; continue;
ms = __nr_to_section(pnum); ms = __nr_to_section(pnum);
printk(KERN_ERR "%s: sparsemem memory map backing failed " printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
"some memory will not be available.\n", __func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
} }
......
...@@ -428,8 +428,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, ...@@ -428,8 +428,8 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (map_map[pnum]) if (map_map[pnum])
continue; continue;
ms = __nr_to_section(pnum); ms = __nr_to_section(pnum);
printk(KERN_ERR "%s: sparsemem memory map backing failed " printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
"some memory will not be available.\n", __func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
} }
} }
...@@ -456,8 +456,8 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) ...@@ -456,8 +456,8 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
if (map) if (map)
return map; return map;
printk(KERN_ERR "%s: sparsemem memory map backing failed " printk(KERN_ERR "%s: sparsemem memory map backing failed some memory will not be available.\n",
"some memory will not be available.\n", __func__); __func__);
ms->section_mem_map = 0; ms->section_mem_map = 0;
return NULL; return NULL;
} }
......
...@@ -2526,8 +2526,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) ...@@ -2526,8 +2526,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
pr_info("Adding %uk swap on %s. " pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
"Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
p->pages<<(PAGE_SHIFT-10), name->name, p->prio, p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
(p->flags & SWP_SOLIDSTATE) ? "SS" : "", (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
......
...@@ -469,8 +469,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, ...@@ -469,8 +469,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
goto retry; goto retry;
} }
if (printk_ratelimit()) if (printk_ratelimit())
pr_warn("vmap allocation for size %lu failed: " pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
"use vmalloc=<size> to increase size.\n", size); size);
kfree(va); kfree(va);
return ERR_PTR(-EBUSY); return ERR_PTR(-EBUSY);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment