Commit 556f36e9 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

blk-mq: balance mapping between present CPUs and queues

Spread queues among present CPUs first, then building mapping on other
non-present CPUs.

So we can minimize count of dead queues which are mapped by un-present
CPUs only. Then bad IO performance can be avoided by unbalanced mapping
between present CPUs and queues.

The similar policy has been applied on Managed IRQ affinity.

Cc: Yi Zhang <yi.zhang@redhat.com>
Reported-by: default avatarYi Zhang <yi.zhang@redhat.com>
Reviewed-by: default avatarBob Liu <bob.liu@oracle.com>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b7e9e1fb
...@@ -15,10 +15,10 @@ ...@@ -15,10 +15,10 @@
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
static int cpu_to_queue_index(struct blk_mq_queue_map *qmap, static int queue_index(struct blk_mq_queue_map *qmap,
unsigned int nr_queues, const int cpu) unsigned int nr_queues, const int q)
{ {
return qmap->queue_offset + (cpu % nr_queues); return qmap->queue_offset + (q % nr_queues);
} }
static int get_first_sibling(unsigned int cpu) static int get_first_sibling(unsigned int cpu)
...@@ -36,21 +36,36 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap) ...@@ -36,21 +36,36 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{ {
unsigned int *map = qmap->mq_map; unsigned int *map = qmap->mq_map;
unsigned int nr_queues = qmap->nr_queues; unsigned int nr_queues = qmap->nr_queues;
unsigned int cpu, first_sibling; unsigned int cpu, first_sibling, q = 0;
for_each_possible_cpu(cpu)
map[cpu] = -1;
/*
* Spread queues among present CPUs first for minimizing
* count of dead queues which are mapped by all un-present CPUs
*/
for_each_present_cpu(cpu) {
if (q >= nr_queues)
break;
map[cpu] = queue_index(qmap, nr_queues, q++);
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (map[cpu] != -1)
continue;
/* /*
* First do sequential mapping between CPUs and queues. * First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of * In case we still have CPUs to map, and we have some number of
* threads per cores then map sibling threads to the same queue * threads per cores then map sibling threads to the same queue
* for performance optimizations. * for performance optimizations.
*/ */
if (cpu < nr_queues) { if (q < nr_queues) {
map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu); map[cpu] = queue_index(qmap, nr_queues, q++);
} else { } else {
first_sibling = get_first_sibling(cpu); first_sibling = get_first_sibling(cpu);
if (first_sibling == cpu) if (first_sibling == cpu)
map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu); map[cpu] = queue_index(qmap, nr_queues, q++);
else else
map[cpu] = map[first_sibling]; map[cpu] = map[first_sibling];
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment