Commit e7836bd6 authored by Shaohua Li's avatar Shaohua Li Committed by NeilBrown

raid5: lockless access raid5 overrided bi_phys_segments

Raid5 overrides bio->bi_phys_segments, accessing it is with device_lock hold,
which is unnecessary, We can make it lockless actually.
Signed-off-by: default avatarShaohua Li <shli@fusionio.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 4eb788df
...@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) ...@@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
* We maintain a biased count of active stripes in the bottom 16 bits of * We maintain a biased count of active stripes in the bottom 16 bits of
* bi_phys_segments, and a count of processed stripes in the upper 16 bits * bi_phys_segments, and a count of processed stripes in the upper 16 bits
*/ */
static inline int raid5_bi_phys_segments(struct bio *bio) static inline int raid5_bi_processed_stripes(struct bio *bio)
{ {
return bio->bi_phys_segments & 0xffff; atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return (atomic_read(segments) >> 16) & 0xffff;
} }
static inline int raid5_bi_hw_segments(struct bio *bio) static inline int raid5_dec_bi_active_stripes(struct bio *bio)
{ {
return (bio->bi_phys_segments >> 16) & 0xffff; atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return atomic_sub_return(1, segments) & 0xffff;
} }
static inline int raid5_dec_bi_phys_segments(struct bio *bio) static inline void raid5_inc_bi_active_stripes(struct bio *bio)
{ {
--bio->bi_phys_segments; atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
return raid5_bi_phys_segments(bio); atomic_inc(segments);
} }
static inline int raid5_dec_bi_hw_segments(struct bio *bio) static inline void raid5_set_bi_processed_stripes(struct bio *bio,
unsigned int cnt)
{ {
unsigned short val = raid5_bi_hw_segments(bio); atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
int old, new;
--val; do {
bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); old = atomic_read(segments);
return val; new = (old & 0xffff) | (cnt << 16);
} while (atomic_cmpxchg(segments, old, new) != old);
} }
static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
{ {
bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
atomic_set(segments, cnt);
} }
/* Find first data disk in a raid6 stripe */ /* Find first data disk in a raid6 stripe */
...@@ -781,7 +787,7 @@ static void ops_complete_biofill(void *stripe_head_ref) ...@@ -781,7 +787,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
while (rbi && rbi->bi_sector < while (rbi && rbi->bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
rbi2 = r5_next_bio(rbi, dev->sector); rbi2 = r5_next_bio(rbi, dev->sector);
if (!raid5_dec_bi_phys_segments(rbi)) { if (!raid5_dec_bi_active_stripes(rbi)) {
rbi->bi_next = return_bi; rbi->bi_next = return_bi;
return_bi = rbi; return_bi = rbi;
} }
...@@ -2367,7 +2373,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in ...@@ -2367,7 +2373,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
if (*bip) if (*bip)
bi->bi_next = *bip; bi->bi_next = *bip;
*bip = bi; *bip = bi;
bi->bi_phys_segments++; raid5_inc_bi_active_stripes(bi);
if (forwrite) { if (forwrite) {
/* check if page is covered */ /* check if page is covered */
...@@ -2464,7 +2470,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -2464,7 +2470,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) { sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) { if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
bi->bi_next = *return_bi; bi->bi_next = *return_bi;
*return_bi = bi; *return_bi = bi;
...@@ -2479,7 +2485,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -2479,7 +2485,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
sh->dev[i].sector + STRIPE_SECTORS) { sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) { if (!raid5_dec_bi_active_stripes(bi)) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
bi->bi_next = *return_bi; bi->bi_next = *return_bi;
*return_bi = bi; *return_bi = bi;
...@@ -2503,7 +2509,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, ...@@ -2503,7 +2509,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
struct bio *nextbi = struct bio *nextbi =
r5_next_bio(bi, sh->dev[i].sector); r5_next_bio(bi, sh->dev[i].sector);
clear_bit(BIO_UPTODATE, &bi->bi_flags); clear_bit(BIO_UPTODATE, &bi->bi_flags);
if (!raid5_dec_bi_phys_segments(bi)) { if (!raid5_dec_bi_active_stripes(bi)) {
bi->bi_next = *return_bi; bi->bi_next = *return_bi;
*return_bi = bi; *return_bi = bi;
} }
...@@ -2722,7 +2728,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, ...@@ -2722,7 +2728,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
while (wbi && wbi->bi_sector < while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector); wbi2 = r5_next_bio(wbi, dev->sector);
if (!raid5_dec_bi_phys_segments(wbi)) { if (!raid5_dec_bi_active_stripes(wbi)) {
md_write_end(conf->mddev); md_write_end(conf->mddev);
wbi->bi_next = *return_bi; wbi->bi_next = *return_bi;
*return_bi = wbi; *return_bi = wbi;
...@@ -3798,7 +3804,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) ...@@ -3798,7 +3804,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf)
* this sets the active strip count to 1 and the processed * this sets the active strip count to 1 and the processed
* strip count to zero (upper 8 bits) * strip count to zero (upper 8 bits)
*/ */
bi->bi_phys_segments = 1; /* biased count of active stripes */ raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */
} }
return bi; return bi;
...@@ -4133,9 +4139,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) ...@@ -4133,9 +4139,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
} }
} }
spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_active_stripes(bi);
remaining = raid5_dec_bi_phys_segments(bi);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) { if (remaining == 0) {
if ( rw == WRITE ) if ( rw == WRITE )
...@@ -4491,7 +4495,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -4491,7 +4495,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
sector += STRIPE_SECTORS, sector += STRIPE_SECTORS,
scnt++) { scnt++) {
if (scnt < raid5_bi_hw_segments(raid_bio)) if (scnt < raid5_bi_processed_stripes(raid_bio))
/* already done this stripe */ /* already done this stripe */
continue; continue;
...@@ -4499,14 +4503,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -4499,14 +4503,14 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
if (!sh) { if (!sh) {
/* failed to get a stripe - must wait */ /* failed to get a stripe - must wait */
raid5_set_bi_hw_segments(raid_bio, scnt); raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio; conf->retry_read_aligned = raid_bio;
return handled; return handled;
} }
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh); release_stripe(sh);
raid5_set_bi_hw_segments(raid_bio, scnt); raid5_set_bi_processed_stripes(raid_bio, scnt);
conf->retry_read_aligned = raid_bio; conf->retry_read_aligned = raid_bio;
return handled; return handled;
} }
...@@ -4515,9 +4519,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) ...@@ -4515,9 +4519,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
release_stripe(sh); release_stripe(sh);
handled++; handled++;
} }
spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_active_stripes(raid_bio);
remaining = raid5_dec_bi_phys_segments(raid_bio);
spin_unlock_irq(&conf->device_lock);
if (remaining == 0) if (remaining == 0)
bio_endio(raid_bio, 0); bio_endio(raid_bio, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads)) if (atomic_dec_and_test(&conf->active_aligned_reads))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment