Commit da41ba65 authored by shli@kernel.org's avatar shli@kernel.org Committed by NeilBrown

raid5: add a new flag to track if a stripe can be batched

A freshly new stripe with write request can be batched. Any time the stripe is
handled or new read is queued, the flag will be cleared.
Signed-off-by: default avatarShaohua Li <shli@fusionio.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 46d5b785
...@@ -555,6 +555,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) ...@@ -555,6 +555,7 @@ static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
goto retry; goto retry;
insert_hash(conf, sh); insert_hash(conf, sh);
sh->cpu = smp_processor_id(); sh->cpu = smp_processor_id();
set_bit(STRIPE_BATCH_READY, &sh->state);
} }
static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
...@@ -2645,7 +2646,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, ...@@ -2645,7 +2646,8 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
* toread/towrite point to the first in a chain. * toread/towrite point to the first in a chain.
* The bi_next chain must be in order. * The bi_next chain must be in order.
*/ */
static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
int forwrite, int previous)
{ {
struct bio **bip; struct bio **bip;
struct r5conf *conf = sh->raid_conf; struct r5conf *conf = sh->raid_conf;
...@@ -2678,6 +2680,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in ...@@ -2678,6 +2680,9 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
goto overlap; goto overlap;
if (!forwrite || previous)
clear_bit(STRIPE_BATCH_READY, &sh->state);
BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
if (*bip) if (*bip)
bi->bi_next = *bip; bi->bi_next = *bip;
...@@ -3824,6 +3829,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -3824,6 +3829,7 @@ static void handle_stripe(struct stripe_head *sh)
return; return;
} }
clear_bit(STRIPE_BATCH_READY, &sh->state);
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
spin_lock(&sh->stripe_lock); spin_lock(&sh->stripe_lock);
/* Cannot process 'sync' concurrently with 'discard' */ /* Cannot process 'sync' concurrently with 'discard' */
...@@ -4793,7 +4799,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) ...@@ -4793,7 +4799,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
} }
if (test_bit(STRIPE_EXPANDING, &sh->state) || if (test_bit(STRIPE_EXPANDING, &sh->state) ||
!add_stripe_bio(sh, bi, dd_idx, rw)) { !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
/* Stripe is busy expanding or /* Stripe is busy expanding or
* add failed due to overlap. Flush everything * add failed due to overlap. Flush everything
* and wait a while * and wait a while
...@@ -5206,7 +5212,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -5206,7 +5212,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled; return handled;
} }
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) {
release_stripe(sh); release_stripe(sh);
raid5_set_bi_processed_stripes(raid_bio, scnt); raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio; conf->retry_read_aligned = raid_bio;
......
...@@ -327,6 +327,7 @@ enum { ...@@ -327,6 +327,7 @@ enum {
STRIPE_ON_UNPLUG_LIST, STRIPE_ON_UNPLUG_LIST,
STRIPE_DISCARD, STRIPE_DISCARD,
STRIPE_ON_RELEASE_LIST, STRIPE_ON_RELEASE_LIST,
STRIPE_BATCH_READY,
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment