Commit 0e4aac73 authored by Artur Paszkiewicz's avatar Artur Paszkiewicz Committed by Song Liu

md/raid5: only add to wq if reshape is in progress

Now that actual overlaps are not handled on the wait_for_overlap wq
anymore, the remaining cases when we wait on this wq are limited to
reshape. If reshape is not in progress, don't add to the wq in
raid5_make_request() because add_wait_queue() / remove_wait_queue()
operations take a spinlock and cause noticeable contention when multiple
threads are submitting requests to the mddev.
Signed-off-by: default avatarArtur Paszkiewicz <artur.paszkiewicz@intel.com>
Link: https://lore.kernel.org/r/20240827153536.6743-3-artur.paszkiewicz@intel.comSigned-off-by: default avatarSong Liu <song@kernel.org>
parent e6a03207
...@@ -6070,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf, ...@@ -6070,6 +6070,7 @@ static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
static bool raid5_make_request(struct mddev *mddev, struct bio * bi) static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
{ {
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
bool on_wq;
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
sector_t logical_sector; sector_t logical_sector;
struct stripe_request_ctx ctx = {}; struct stripe_request_ctx ctx = {};
...@@ -6143,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -6143,11 +6144,15 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
* sequential IO pattern. We don't bother with the optimization when * sequential IO pattern. We don't bother with the optimization when
* reshaping as the performance benefit is not worth the complexity. * reshaping as the performance benefit is not worth the complexity.
*/ */
if (likely(conf->reshape_progress == MaxSector)) if (likely(conf->reshape_progress == MaxSector)) {
logical_sector = raid5_bio_lowest_chunk_sector(conf, bi); logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
on_wq = false;
} else {
add_wait_queue(&conf->wait_for_overlap, &wait);
on_wq = true;
}
s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf); s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
add_wait_queue(&conf->wait_for_overlap, &wait);
while (1) { while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector, res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi); bi);
...@@ -6158,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -6158,6 +6163,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
continue; continue;
if (res == STRIPE_SCHEDULE_AND_RETRY) { if (res == STRIPE_SCHEDULE_AND_RETRY) {
WARN_ON_ONCE(!on_wq);
/* /*
* Must release the reference to batch_last before * Must release the reference to batch_last before
* scheduling and waiting for work to be done, * scheduling and waiting for work to be done,
...@@ -6182,6 +6188,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -6182,6 +6188,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
logical_sector = ctx.first_sector + logical_sector = ctx.first_sector +
(s << RAID5_STRIPE_SHIFT(conf)); (s << RAID5_STRIPE_SHIFT(conf));
} }
if (unlikely(on_wq))
remove_wait_queue(&conf->wait_for_overlap, &wait); remove_wait_queue(&conf->wait_for_overlap, &wait);
if (ctx.batch_last) if (ctx.batch_last)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment