Commit bc3acbb8 authored by Vasyl Gomonovych's avatar Vasyl Gomonovych Committed by Santosh Shilimkar

soc: ti: knav_qmss: Use percpu instead atomic for stats counter

Hwqueue has collect statistics in heavy use queue_pop/queu_push functions
for cache efficiency and make push/pop faster use percpu variables.
For performance reasons, driver should keep descriptor in software handler
as short as possible and quickly return it back to hardware queue.
Descriptors coming into driver from hardware after pop and return back
by push to reduce descriptor lifetime in driver collect statistics on percpu.
Signed-off-by: default avatarVasyl Gomonovych <gomonovych@gmail.com>
Signed-off-by: default avatarSantosh Shilimkar <santosh.shilimkar@oracle.com>
parent 8af70cd2
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#ifndef __KNAV_QMSS_H__ #ifndef __KNAV_QMSS_H__
#define __KNAV_QMSS_H__ #define __KNAV_QMSS_H__
#include <linux/percpu.h>
#define THRESH_GTE BIT(7) #define THRESH_GTE BIT(7)
#define THRESH_LT 0 #define THRESH_LT 0
...@@ -162,11 +164,11 @@ struct knav_qmgr_info { ...@@ -162,11 +164,11 @@ struct knav_qmgr_info {
* notifies: notifier counts * notifies: notifier counts
*/ */
struct knav_queue_stats { struct knav_queue_stats {
atomic_t pushes; unsigned int pushes;
atomic_t pops; unsigned int pops;
atomic_t push_errors; unsigned int push_errors;
atomic_t pop_errors; unsigned int pop_errors;
atomic_t notifies; unsigned int notifies;
}; };
/** /**
...@@ -283,7 +285,7 @@ struct knav_queue_inst { ...@@ -283,7 +285,7 @@ struct knav_queue_inst {
struct knav_queue { struct knav_queue {
struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek;
struct knav_queue_inst *inst; struct knav_queue_inst *inst;
struct knav_queue_stats stats; struct knav_queue_stats __percpu *stats;
knav_queue_notify_fn notifier_fn; knav_queue_notify_fn notifier_fn;
void *notifier_fn_arg; void *notifier_fn_arg;
atomic_t notifier_enabled; atomic_t notifier_enabled;
......
...@@ -83,7 +83,7 @@ void knav_queue_notify(struct knav_queue_inst *inst) ...@@ -83,7 +83,7 @@ void knav_queue_notify(struct knav_queue_inst *inst)
continue; continue;
if (WARN_ON(!qh->notifier_fn)) if (WARN_ON(!qh->notifier_fn))
continue; continue;
atomic_inc(&qh->stats.notifies); this_cpu_inc(qh->stats->notifies);
qh->notifier_fn(qh->notifier_fn_arg); qh->notifier_fn(qh->notifier_fn_arg);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -214,6 +214,12 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, ...@@ -214,6 +214,12 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
if (!qh) if (!qh)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
qh->stats = alloc_percpu(struct knav_queue_stats);
if (!qh->stats) {
ret = -ENOMEM;
goto err;
}
qh->flags = flags; qh->flags = flags;
qh->inst = inst; qh->inst = inst;
id = inst->id - inst->qmgr->start_queue; id = inst->id - inst->qmgr->start_queue;
...@@ -229,13 +235,17 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, ...@@ -229,13 +235,17 @@ static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
if (range->ops && range->ops->open_queue) if (range->ops && range->ops->open_queue)
ret = range->ops->open_queue(range, inst, flags); ret = range->ops->open_queue(range, inst, flags);
if (ret) { if (ret)
devm_kfree(inst->kdev->dev, qh); goto err;
return ERR_PTR(ret);
}
} }
list_add_tail_rcu(&qh->list, &inst->handles); list_add_tail_rcu(&qh->list, &inst->handles);
return qh; return qh;
err:
if (qh->stats)
free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh);
return ERR_PTR(ret);
} }
static struct knav_queue * static struct knav_queue *
...@@ -411,6 +421,12 @@ static void knav_queue_debug_show_instance(struct seq_file *s, ...@@ -411,6 +421,12 @@ static void knav_queue_debug_show_instance(struct seq_file *s,
{ {
struct knav_device *kdev = inst->kdev; struct knav_device *kdev = inst->kdev;
struct knav_queue *qh; struct knav_queue *qh;
int cpu = 0;
int pushes = 0;
int pops = 0;
int push_errors = 0;
int pop_errors = 0;
int notifies = 0;
if (!knav_queue_is_busy(inst)) if (!knav_queue_is_busy(inst))
return; return;
...@@ -418,19 +434,22 @@ static void knav_queue_debug_show_instance(struct seq_file *s, ...@@ -418,19 +434,22 @@ static void knav_queue_debug_show_instance(struct seq_file *s,
seq_printf(s, "\tqueue id %d (%s)\n", seq_printf(s, "\tqueue id %d (%s)\n",
kdev->base_id + inst->id, inst->name); kdev->base_id + inst->id, inst->name);
for_each_handle_rcu(qh, inst) { for_each_handle_rcu(qh, inst) {
seq_printf(s, "\t\thandle %p: ", qh); for_each_possible_cpu(cpu) {
seq_printf(s, "pushes %8d, ", pushes += per_cpu_ptr(qh->stats, cpu)->pushes;
atomic_read(&qh->stats.pushes)); pops += per_cpu_ptr(qh->stats, cpu)->pops;
seq_printf(s, "pops %8d, ", push_errors += per_cpu_ptr(qh->stats, cpu)->push_errors;
atomic_read(&qh->stats.pops)); pop_errors += per_cpu_ptr(qh->stats, cpu)->pop_errors;
seq_printf(s, "count %8d, ", notifies += per_cpu_ptr(qh->stats, cpu)->notifies;
knav_queue_get_count(qh)); }
seq_printf(s, "notifies %8d, ",
atomic_read(&qh->stats.notifies)); seq_printf(s, "\t\thandle %p: pushes %8d, pops %8d, count %8d, notifies %8d, push errors %8d, pop errors %8d\n",
seq_printf(s, "push errors %8d, ", qh,
atomic_read(&qh->stats.push_errors)); pushes,
seq_printf(s, "pop errors %8d\n", pops,
atomic_read(&qh->stats.pop_errors)); knav_queue_get_count(qh),
notifies,
push_errors,
pop_errors);
} }
} }
...@@ -547,6 +566,7 @@ void knav_queue_close(void *qhandle) ...@@ -547,6 +566,7 @@ void knav_queue_close(void *qhandle)
if (range->ops && range->ops->close_queue) if (range->ops && range->ops->close_queue)
range->ops->close_queue(range, inst); range->ops->close_queue(range, inst);
} }
free_percpu(qh->stats);
devm_kfree(inst->kdev->dev, qh); devm_kfree(inst->kdev->dev, qh);
} }
EXPORT_SYMBOL_GPL(knav_queue_close); EXPORT_SYMBOL_GPL(knav_queue_close);
...@@ -620,7 +640,7 @@ int knav_queue_push(void *qhandle, dma_addr_t dma, ...@@ -620,7 +640,7 @@ int knav_queue_push(void *qhandle, dma_addr_t dma,
val = (u32)dma | ((size / 16) - 1); val = (u32)dma | ((size / 16) - 1);
writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
atomic_inc(&qh->stats.pushes); this_cpu_inc(qh->stats->pushes);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(knav_queue_push); EXPORT_SYMBOL_GPL(knav_queue_push);
...@@ -658,7 +678,7 @@ dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) ...@@ -658,7 +678,7 @@ dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
if (size) if (size)
*size = ((val & DESC_SIZE_MASK) + 1) * 16; *size = ((val & DESC_SIZE_MASK) + 1) * 16;
atomic_inc(&qh->stats.pops); this_cpu_inc(qh->stats->pops);
return dma; return dma;
} }
EXPORT_SYMBOL_GPL(knav_queue_pop); EXPORT_SYMBOL_GPL(knav_queue_pop);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment