Commit e9eeba28 authored by Guoqing Jiang's avatar Guoqing Jiang Committed by Jens Axboe

md/raid10: read balance chooses idlest disk for SSD

Andy reported that raid10 array with SSD disks has poor
read performance. Compared with raid1, RAID-1 can be 3x
faster than RAID-10 sometimes [1].

The thing is that raid10 chooses the low distance disk
for read request, however, the approach doesn't work
well for SSD device since it doesn't have spindle like
HDD, we should just read from the SSD which has less
pending IO like commit 9dedf603 ("md/raid1: read
balance chooses idlest disk for SSD").

So this commit selects the idlest SSD disk for read if
array has none rotational disk, otherwise, read_balance
uses the previous distance priority algorithm. With the
change, the performance of raid10 gets increased largely
per Andy's test [2].

[1]. https://marc.info/?l=linux-raid&m=155915890004761&w=2
[2]. https://marc.info/?l=linux-raid&m=155990654223786&w=2Tested-by: default avatarAndy Smith <andy@strugglers.net>
Signed-off-by: default avatarGuoqing Jiang <gqjiang@suse.com>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c7afa803
...@@ -707,15 +707,19 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -707,15 +707,19 @@ static struct md_rdev *read_balance(struct r10conf *conf,
int sectors = r10_bio->sectors; int sectors = r10_bio->sectors;
int best_good_sectors; int best_good_sectors;
sector_t new_distance, best_dist; sector_t new_distance, best_dist;
struct md_rdev *best_rdev, *rdev = NULL; struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
int do_balance; int do_balance;
int best_slot; int best_dist_slot, best_pending_slot;
bool has_nonrot_disk = false;
unsigned int min_pending;
struct geom *geo = &conf->geo; struct geom *geo = &conf->geo;
raid10_find_phys(conf, r10_bio); raid10_find_phys(conf, r10_bio);
rcu_read_lock(); rcu_read_lock();
best_slot = -1; best_dist_slot = -1;
best_rdev = NULL; min_pending = UINT_MAX;
best_dist_rdev = NULL;
best_pending_rdev = NULL;
best_dist = MaxSector; best_dist = MaxSector;
best_good_sectors = 0; best_good_sectors = 0;
do_balance = 1; do_balance = 1;
...@@ -737,6 +741,8 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -737,6 +741,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
sector_t first_bad; sector_t first_bad;
int bad_sectors; int bad_sectors;
sector_t dev_sector; sector_t dev_sector;
unsigned int pending;
bool nonrot;
if (r10_bio->devs[slot].bio == IO_BLOCKED) if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue; continue;
...@@ -773,8 +779,8 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -773,8 +779,8 @@ static struct md_rdev *read_balance(struct r10conf *conf,
first_bad - dev_sector; first_bad - dev_sector;
if (good_sectors > best_good_sectors) { if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors; best_good_sectors = good_sectors;
best_slot = slot; best_dist_slot = slot;
best_rdev = rdev; best_dist_rdev = rdev;
} }
if (!do_balance) if (!do_balance)
/* Must read from here */ /* Must read from here */
...@@ -787,14 +793,23 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -787,14 +793,23 @@ static struct md_rdev *read_balance(struct r10conf *conf,
if (!do_balance) if (!do_balance)
break; break;
if (best_slot >= 0) nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
if (min_pending > pending && nonrot) {
min_pending = pending;
best_pending_slot = slot;
best_pending_rdev = rdev;
}
if (best_dist_slot >= 0)
/* At least 2 disks to choose from so failfast is OK */ /* At least 2 disks to choose from so failfast is OK */
set_bit(R10BIO_FailFast, &r10_bio->state); set_bit(R10BIO_FailFast, &r10_bio->state);
/* This optimisation is debatable, and completely destroys /* This optimisation is debatable, and completely destroys
* sequential read speed for 'far copies' arrays. So only * sequential read speed for 'far copies' arrays. So only
* keep it for 'near' arrays, and review those later. * keep it for 'near' arrays, and review those later.
*/ */
if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending)) if (geo->near_copies > 1 && !pending)
new_distance = 0; new_distance = 0;
/* for far > 1 always use the lowest address */ /* for far > 1 always use the lowest address */
...@@ -803,15 +818,21 @@ static struct md_rdev *read_balance(struct r10conf *conf, ...@@ -803,15 +818,21 @@ static struct md_rdev *read_balance(struct r10conf *conf,
else else
new_distance = abs(r10_bio->devs[slot].addr - new_distance = abs(r10_bio->devs[slot].addr -
conf->mirrors[disk].head_position); conf->mirrors[disk].head_position);
if (new_distance < best_dist) { if (new_distance < best_dist) {
best_dist = new_distance; best_dist = new_distance;
best_slot = slot; best_dist_slot = slot;
best_rdev = rdev; best_dist_rdev = rdev;
} }
} }
if (slot >= conf->copies) { if (slot >= conf->copies) {
slot = best_slot; if (has_nonrot_disk) {
rdev = best_rdev; slot = best_pending_slot;
rdev = best_pending_rdev;
} else {
slot = best_dist_slot;
rdev = best_dist_rdev;
}
} }
if (slot >= 0) { if (slot >= 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment