Commit 016c76ac authored by NeilBrown's avatar NeilBrown Committed by Shaohua Li

md/raid5: use bio_inc_remaining() instead of repurposing bi_phys_segments as a counter

md/raid5 needs to keep track of how many stripe_heads are processing a
bio so that it can delay calling bio_endio() until all stripe_heads
have completed.  It currently uses 16 bits of ->bi_phys_segments for
this purpose.

16 bits is only enough for 256M requests, and it is possible for a
single bio to be larger than this, which causes problems.  Also, the
bio struct contains a larger counter, __bi_remaining, which has a
purpose very similar to the purpose of our counter.  So stop using
->bi_phys_segments, and instead use __bi_remaining.

This means we don't need to initialize the counter, as our caller
initializes it to '1'.  It also means we can call bio_endio() directly
as it tests this counter internally.
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent bd83d0a2
...@@ -318,8 +318,7 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) ...@@ -318,8 +318,7 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector); wbi2 = r5_next_bio(wbi, dev->sector);
md_write_end(conf->mddev); md_write_end(conf->mddev);
if (!raid5_dec_bi_active_stripes(wbi)) bio_endio(wbi);
bio_endio(wbi);
wbi = wbi2; wbi = wbi2;
} }
} }
......
...@@ -1322,8 +1322,7 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -1322,8 +1322,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_iter.bi_sector < while (rbi && rbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector); rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_active_stripes(rbi)) bio_endio(rbi);
bio_endio(rbi);
rbi = rbi2; rbi = rbi2;
} }
} }
...@@ -3196,14 +3195,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -3196,14 +3195,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
(unsigned long long)bi->bi_iter.bi_sector, (unsigned long long)bi->bi_iter.bi_sector,
(unsigned long long)sh->sector); (unsigned long long)sh->sector);
/*
* If several bio share a stripe. The bio bi_phys_segments acts as a
* reference count to avoid race. The reference count should already be
* increased before this function is called (for example, in
* raid5_make_request()), so other bio sharing this stripe will not free the
* stripe. If a stripe is owned by one stripe, the stripe lock will
* protect it.
*/
spin_lock_irq(&sh->stripe_lock); spin_lock_irq(&sh->stripe_lock);
/* Don't allow new IO added to stripes in batch list */ /* Don't allow new IO added to stripes in batch list */
if (sh->batch_head) if (sh->batch_head)
...@@ -3259,7 +3250,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, ...@@ -3259,7 +3250,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
if (*bip) if (*bip)
bi->bi_next = *bip; bi->bi_next = *bip;
*bip = bi; *bip = bi;
raid5_inc_bi_active_stripes(bi); bio_inc_remaining(bi);
md_write_inc(conf->mddev, bi); md_write_inc(conf->mddev, bi);
if (forwrite) { if (forwrite) {
...@@ -3384,8 +3375,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -3384,8 +3375,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi->bi_error = -EIO; bi->bi_error = -EIO;
md_write_end(conf->mddev); md_write_end(conf->mddev);
if (!raid5_dec_bi_active_stripes(bi)) bio_endio(bi);
bio_endio(bi);
bi = nextbi; bi = nextbi;
} }
if (bitmap_end) if (bitmap_end)
...@@ -3407,8 +3397,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -3407,8 +3397,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
bi->bi_error = -EIO; bi->bi_error = -EIO;
md_write_end(conf->mddev); md_write_end(conf->mddev);
if (!raid5_dec_bi_active_stripes(bi)) bio_endio(bi);
bio_endio(bi);
bi = bi2; bi = bi2;
} }
...@@ -3433,8 +3422,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -3433,8 +3422,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
r5_next_bio(bi, sh->dev[i].sector); r5_next_bio(bi, sh->dev[i].sector);
bi->bi_error = -EIO; bi->bi_error = -EIO;
if (!raid5_dec_bi_active_stripes(bi)) bio_endio(bi);
bio_endio(bi);
bi = nextbi; bi = nextbi;
} }
} }
...@@ -3766,8 +3754,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -3766,8 +3754,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector); wbi2 = r5_next_bio(wbi, dev->sector);
md_write_end(conf->mddev); md_write_end(conf->mddev);
if (!raid5_dec_bi_active_stripes(wbi)) bio_endio(wbi);
bio_endio(wbi);
wbi = wbi2; wbi = wbi2;
} }
bitmap_endwrite(conf->mddev->bitmap, sh->sector, bitmap_endwrite(conf->mddev->bitmap, sh->sector,
...@@ -5112,7 +5099,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) ...@@ -5112,7 +5099,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
* this sets the active strip count to 1 and the processed * this sets the active strip count to 1 and the processed
* strip count to zero (upper 8 bits) * strip count to zero (upper 8 bits)
*/ */
raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ raid5_set_bi_processed_stripes(bi, 0);
} }
return bi; return bi;
...@@ -5449,7 +5436,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) ...@@ -5449,7 +5436,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
sector_t logical_sector, last_sector; sector_t logical_sector, last_sector;
struct stripe_head *sh; struct stripe_head *sh;
int remaining;
int stripe_sectors; int stripe_sectors;
if (mddev->reshape_position != MaxSector) if (mddev->reshape_position != MaxSector)
...@@ -5460,7 +5446,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) ...@@ -5460,7 +5446,6 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
md_write_start(mddev, bi); md_write_start(mddev, bi);
stripe_sectors = conf->chunk_sectors * stripe_sectors = conf->chunk_sectors *
...@@ -5507,7 +5492,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) ...@@ -5507,7 +5492,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
continue; continue;
sh->dev[d].towrite = bi; sh->dev[d].towrite = bi;
set_bit(R5_OVERWRITE, &sh->dev[d].flags); set_bit(R5_OVERWRITE, &sh->dev[d].flags);
raid5_inc_bi_active_stripes(bi); bio_inc_remaining(bi);
md_write_inc(mddev, bi); md_write_inc(mddev, bi);
sh->overwrite_disks++; sh->overwrite_disks++;
} }
...@@ -5532,10 +5517,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) ...@@ -5532,10 +5517,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
} }
md_write_end(mddev); md_write_end(mddev);
remaining = raid5_dec_bi_active_stripes(bi); bio_endio(bi);
if (remaining == 0) {
bio_endio(bi);
}
} }
static void raid5_make_request(struct mddev *mddev, struct bio * bi) static void raid5_make_request(struct mddev *mddev, struct bio * bi)
...@@ -5546,7 +5528,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -5546,7 +5528,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
sector_t logical_sector, last_sector; sector_t logical_sector, last_sector;
struct stripe_head *sh; struct stripe_head *sh;
const int rw = bio_data_dir(bi); const int rw = bio_data_dir(bi);
int remaining;
DEFINE_WAIT(w); DEFINE_WAIT(w);
bool do_prepare; bool do_prepare;
bool do_flush = false; bool do_flush = false;
...@@ -5588,7 +5569,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -5588,7 +5569,6 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
last_sector = bio_end_sector(bi); last_sector = bio_end_sector(bi);
bi->bi_next = NULL; bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
md_write_start(mddev, bi); md_write_start(mddev, bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
...@@ -5726,14 +5706,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi) ...@@ -5726,14 +5706,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE) if (rw == WRITE)
md_write_end(mddev); md_write_end(mddev);
remaining = raid5_dec_bi_active_stripes(bi); bio_endio(bi);
if (remaining == 0) {
trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
bi, 0);
bio_endio(bi);
}
} }
static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
...@@ -6098,7 +6071,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -6098,7 +6071,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
int dd_idx; int dd_idx;
sector_t sector, logical_sector, last_sector; sector_t sector, logical_sector, last_sector;
int scnt = 0; int scnt = 0;
int remaining;
int handled = 0; int handled = 0;
logical_sector = raid_bio->bi_iter.bi_sector & logical_sector = raid_bio->bi_iter.bi_sector &
...@@ -6137,12 +6109,9 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -6137,12 +6109,9 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
raid5_release_stripe(sh); raid5_release_stripe(sh);
handled++; handled++;
} }
remaining = raid5_dec_bi_active_stripes(raid_bio);
if (remaining == 0) { bio_endio(raid_bio);
trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
raid_bio, 0);
bio_endio(raid_bio);
}
if (atomic_dec_and_test(&conf->active_aligned_reads)) if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_quiescent); wake_up(&conf->wait_for_quiescent);
return handled; return handled;
......
...@@ -488,8 +488,7 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) ...@@ -488,8 +488,7 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
} }
/* /*
* We maintain a biased count of active stripes in the bottom 16 bits of * We maintain a count of processed stripes in the upper 16 bits
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
*/ */
static inline int raid5_bi_processed_stripes(struct bio *bio) static inline int raid5_bi_processed_stripes(struct bio *bio)
{ {
...@@ -498,20 +497,6 @@ static inline int raid5_bi_processed_stripes(struct bio *bio) ...@@ -498,20 +497,6 @@ static inline int raid5_bi_processed_stripes(struct bio *bio)
return (atomic_read(segments) >> 16) & 0xffff; return (atomic_read(segments) >> 16) & 0xffff;
} }
static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return atomic_sub_return(1, segments) & 0xffff;
}
static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
atomic_inc(segments);
}
static inline void raid5_set_bi_processed_stripes(struct bio *bio, static inline void raid5_set_bi_processed_stripes(struct bio *bio,
unsigned int cnt) unsigned int cnt)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment