Commit bf16d19a authored by Joe Perches's avatar Joe Perches Committed by Linus Torvalds

mm: slub: convert sysfs sprintf family to sysfs_emit/sysfs_emit_at

Convert the unbounded uses of sprintf to sysfs_emit.

A few conversions may now not end in a newline if the output buffer is
overflowed.

Link: https://lkml.kernel.org/r/0c90a90f466167f8c37de4b737553cf49c4a277f.1605376435.git.joe@perches.comSigned-off-by: default avatarJoe Perches <joe@perches.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79d4d38a
...@@ -4734,7 +4734,7 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -4734,7 +4734,7 @@ static int list_locations(struct kmem_cache *s, char *buf,
if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
GFP_KERNEL)) { GFP_KERNEL)) {
return sprintf(buf, "Out of memory\n"); return sysfs_emit(buf, "Out of memory\n");
} }
/* Push back cpu slabs */ /* Push back cpu slabs */
flush_all(s); flush_all(s);
...@@ -4757,50 +4757,45 @@ static int list_locations(struct kmem_cache *s, char *buf, ...@@ -4757,50 +4757,45 @@ static int list_locations(struct kmem_cache *s, char *buf,
for (i = 0; i < t.count; i++) { for (i = 0; i < t.count; i++) {
struct location *l = &t.loc[i]; struct location *l = &t.loc[i];
if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100) len += sysfs_emit_at(buf, len, "%7ld ", l->count);
break;
len += sprintf(buf + len, "%7ld ", l->count);
if (l->addr) if (l->addr)
len += sprintf(buf + len, "%pS", (void *)l->addr); len += sysfs_emit_at(buf, len, "%pS", (void *)l->addr);
else else
len += sprintf(buf + len, "<not-available>"); len += sysfs_emit_at(buf, len, "<not-available>");
if (l->sum_time != l->min_time) { if (l->sum_time != l->min_time)
len += sprintf(buf + len, " age=%ld/%ld/%ld", len += sysfs_emit_at(buf, len, " age=%ld/%ld/%ld",
l->min_time, l->min_time,
(long)div_u64(l->sum_time, l->count), (long)div_u64(l->sum_time,
l->count),
l->max_time); l->max_time);
} else else
len += sprintf(buf + len, " age=%ld", len += sysfs_emit_at(buf, len, " age=%ld", l->min_time);
l->min_time);
if (l->min_pid != l->max_pid) if (l->min_pid != l->max_pid)
len += sprintf(buf + len, " pid=%ld-%ld", len += sysfs_emit_at(buf, len, " pid=%ld-%ld",
l->min_pid, l->max_pid); l->min_pid, l->max_pid);
else else
len += sprintf(buf + len, " pid=%ld", len += sysfs_emit_at(buf, len, " pid=%ld",
l->min_pid); l->min_pid);
if (num_online_cpus() > 1 && if (num_online_cpus() > 1 &&
!cpumask_empty(to_cpumask(l->cpus)) && !cpumask_empty(to_cpumask(l->cpus)))
len < PAGE_SIZE - 60) len += sysfs_emit_at(buf, len, " cpus=%*pbl",
len += scnprintf(buf + len, PAGE_SIZE - len - 50,
" cpus=%*pbl",
cpumask_pr_args(to_cpumask(l->cpus))); cpumask_pr_args(to_cpumask(l->cpus)));
if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
len < PAGE_SIZE - 60) len += sysfs_emit_at(buf, len, " nodes=%*pbl",
len += scnprintf(buf + len, PAGE_SIZE - len - 50,
" nodes=%*pbl",
nodemask_pr_args(&l->nodes)); nodemask_pr_args(&l->nodes));
len += sprintf(buf + len, "\n"); len += sysfs_emit_at(buf, len, "\n");
} }
free_loc_track(&t); free_loc_track(&t);
if (!t.count) if (!t.count)
len += sprintf(buf, "No data\n"); len += sysfs_emit_at(buf, len, "No data\n");
return len; return len;
} }
#endif /* CONFIG_SLUB_DEBUG */ #endif /* CONFIG_SLUB_DEBUG */
...@@ -4903,6 +4898,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4903,6 +4898,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int node; int node;
int x; int x;
unsigned long *nodes; unsigned long *nodes;
int len = 0;
nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
if (!nodes) if (!nodes)
...@@ -4991,15 +4987,19 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -4991,15 +4987,19 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
nodes[node] += x; nodes[node] += x;
} }
} }
x = sprintf(buf, "%lu", total);
len += sysfs_emit_at(buf, len, "%lu", total);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
for (node = 0; node < nr_node_ids; node++) for (node = 0; node < nr_node_ids; node++) {
if (nodes[node]) if (nodes[node])
x += sprintf(buf + x, " N%d=%lu", len += sysfs_emit_at(buf, len, " N%d=%lu",
node, nodes[node]); node, nodes[node]);
}
#endif #endif
len += sysfs_emit_at(buf, len, "\n");
kfree(nodes); kfree(nodes);
return x + sprintf(buf + x, "\n");
return len;
} }
#define to_slab_attr(n) container_of(n, struct slab_attribute, attr) #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
...@@ -5021,37 +5021,37 @@ struct slab_attribute { ...@@ -5021,37 +5021,37 @@ struct slab_attribute {
static ssize_t slab_size_show(struct kmem_cache *s, char *buf) static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->size); return sysfs_emit(buf, "%u\n", s->size);
} }
SLAB_ATTR_RO(slab_size); SLAB_ATTR_RO(slab_size);
static ssize_t align_show(struct kmem_cache *s, char *buf) static ssize_t align_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->align); return sysfs_emit(buf, "%u\n", s->align);
} }
SLAB_ATTR_RO(align); SLAB_ATTR_RO(align);
static ssize_t object_size_show(struct kmem_cache *s, char *buf) static ssize_t object_size_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->object_size); return sysfs_emit(buf, "%u\n", s->object_size);
} }
SLAB_ATTR_RO(object_size); SLAB_ATTR_RO(object_size);
static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", oo_objects(s->oo)); return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
} }
SLAB_ATTR_RO(objs_per_slab); SLAB_ATTR_RO(objs_per_slab);
static ssize_t order_show(struct kmem_cache *s, char *buf) static ssize_t order_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", oo_order(s->oo)); return sysfs_emit(buf, "%u\n", oo_order(s->oo));
} }
SLAB_ATTR_RO(order); SLAB_ATTR_RO(order);
static ssize_t min_partial_show(struct kmem_cache *s, char *buf) static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%lu\n", s->min_partial); return sysfs_emit(buf, "%lu\n", s->min_partial);
} }
static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
...@@ -5071,7 +5071,7 @@ SLAB_ATTR(min_partial); ...@@ -5071,7 +5071,7 @@ SLAB_ATTR(min_partial);
static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", slub_cpu_partial(s)); return sysfs_emit(buf, "%u\n", slub_cpu_partial(s));
} }
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
...@@ -5096,13 +5096,13 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf) ...@@ -5096,13 +5096,13 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{ {
if (!s->ctor) if (!s->ctor)
return 0; return 0;
return sprintf(buf, "%pS\n", s->ctor); return sysfs_emit(buf, "%pS\n", s->ctor);
} }
SLAB_ATTR_RO(ctor); SLAB_ATTR_RO(ctor);
static ssize_t aliases_show(struct kmem_cache *s, char *buf) static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
} }
SLAB_ATTR_RO(aliases); SLAB_ATTR_RO(aliases);
...@@ -5135,7 +5135,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) ...@@ -5135,7 +5135,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
int objects = 0; int objects = 0;
int pages = 0; int pages = 0;
int cpu; int cpu;
int len; int len = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct page *page; struct page *page;
...@@ -5148,52 +5148,53 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) ...@@ -5148,52 +5148,53 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
} }
} }
len = sprintf(buf, "%d(%d)", objects, pages); len += sysfs_emit_at(buf, len, "%d(%d)", objects, pages);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
struct page *page; struct page *page;
page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); page = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
if (page)
if (page && len < PAGE_SIZE - 20) len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
len += sprintf(buf + len, " C%d=%d(%d)", cpu, cpu, page->pobjects, page->pages);
page->pobjects, page->pages);
} }
#endif #endif
return len + sprintf(buf + len, "\n"); len += sysfs_emit_at(buf, len, "\n");
return len;
} }
SLAB_ATTR_RO(slabs_cpu_partial); SLAB_ATTR_RO(slabs_cpu_partial);
static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
} }
SLAB_ATTR_RO(reclaim_account); SLAB_ATTR_RO(reclaim_account);
static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
} }
SLAB_ATTR_RO(hwcache_align); SLAB_ATTR_RO(hwcache_align);
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
} }
SLAB_ATTR_RO(cache_dma); SLAB_ATTR_RO(cache_dma);
#endif #endif
static ssize_t usersize_show(struct kmem_cache *s, char *buf) static ssize_t usersize_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->usersize); return sysfs_emit(buf, "%u\n", s->usersize);
} }
SLAB_ATTR_RO(usersize); SLAB_ATTR_RO(usersize);
static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
} }
SLAB_ATTR_RO(destroy_by_rcu); SLAB_ATTR_RO(destroy_by_rcu);
...@@ -5212,33 +5213,33 @@ SLAB_ATTR_RO(total_objects); ...@@ -5212,33 +5213,33 @@ SLAB_ATTR_RO(total_objects);
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
} }
SLAB_ATTR_RO(sanity_checks); SLAB_ATTR_RO(sanity_checks);
static ssize_t trace_show(struct kmem_cache *s, char *buf) static ssize_t trace_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
} }
SLAB_ATTR_RO(trace); SLAB_ATTR_RO(trace);
static ssize_t red_zone_show(struct kmem_cache *s, char *buf) static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
} }
SLAB_ATTR_RO(red_zone); SLAB_ATTR_RO(red_zone);
static ssize_t poison_show(struct kmem_cache *s, char *buf) static ssize_t poison_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
} }
SLAB_ATTR_RO(poison); SLAB_ATTR_RO(poison);
static ssize_t store_user_show(struct kmem_cache *s, char *buf) static ssize_t store_user_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
} }
SLAB_ATTR_RO(store_user); SLAB_ATTR_RO(store_user);
...@@ -5282,7 +5283,7 @@ SLAB_ATTR_RO(free_calls); ...@@ -5282,7 +5283,7 @@ SLAB_ATTR_RO(free_calls);
#ifdef CONFIG_FAILSLAB #ifdef CONFIG_FAILSLAB
static ssize_t failslab_show(struct kmem_cache *s, char *buf) static ssize_t failslab_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
} }
SLAB_ATTR_RO(failslab); SLAB_ATTR_RO(failslab);
#endif #endif
...@@ -5306,7 +5307,7 @@ SLAB_ATTR(shrink); ...@@ -5306,7 +5307,7 @@ SLAB_ATTR(shrink);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
{ {
return sprintf(buf, "%u\n", s->remote_node_defrag_ratio / 10); return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
} }
static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
...@@ -5333,7 +5334,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) ...@@ -5333,7 +5334,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
{ {
unsigned long sum = 0; unsigned long sum = 0;
int cpu; int cpu;
int len; int len = 0;
int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL); int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
if (!data) if (!data)
...@@ -5346,16 +5347,19 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) ...@@ -5346,16 +5347,19 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
sum += x; sum += x;
} }
len = sprintf(buf, "%lu", sum); len += sysfs_emit_at(buf, len, "%lu", sum);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (data[cpu] && len < PAGE_SIZE - 20) if (data[cpu])
len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]); len += sysfs_emit_at(buf, len, " C%d=%u",
cpu, data[cpu]);
} }
#endif #endif
kfree(data); kfree(data);
return len + sprintf(buf + len, "\n"); len += sysfs_emit_at(buf, len, "\n");
return len;
} }
static void clear_stat(struct kmem_cache *s, enum stat_item si) static void clear_stat(struct kmem_cache *s, enum stat_item si)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment