Commit c911c46c authored by Yufen Yu's avatar Yufen Yu Committed by Song Liu

md/raid456: convert macro STRIPE_* to RAID5_STRIPE_*

Convert macro STRIPE_SIZE, STRIPE_SECTORS and STRIPE_SHIFT to
RAID5_STRIPE_SIZE(), RAID5_STRIPE_SECTORS() and RAID5_STRIPE_SHIFT().

This patch is prepare for the following adjustable stripe_size.
It will not change any existing functionality.
Signed-off-by: default avatarYufen Yu <yuyufen@huawei.com>
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
parent 1684e975
...@@ -296,8 +296,8 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) ...@@ -296,8 +296,8 @@ r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev)
wbi = dev->written; wbi = dev->written;
dev->written = NULL; dev->written = NULL;
while (wbi && wbi->bi_iter.bi_sector < while (wbi && wbi->bi_iter.bi_sector <
dev->sector + STRIPE_SECTORS) { dev->sector + RAID5_STRIPE_SECTORS(conf)) {
wbi2 = r5_next_bio(wbi, dev->sector); wbi2 = r5_next_bio(conf, wbi, dev->sector);
md_write_end(conf->mddev); md_write_end(conf->mddev);
bio_endio(wbi); bio_endio(wbi);
wbi = wbi2; wbi = wbi2;
...@@ -314,7 +314,7 @@ void r5c_handle_cached_data_endio(struct r5conf *conf, ...@@ -314,7 +314,7 @@ void r5c_handle_cached_data_endio(struct r5conf *conf,
set_bit(R5_UPTODATE, &sh->dev[i].flags); set_bit(R5_UPTODATE, &sh->dev[i].flags);
r5c_return_dev_pending_writes(conf, &sh->dev[i]); r5c_return_dev_pending_writes(conf, &sh->dev[i]);
md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
STRIPE_SECTORS, RAID5_STRIPE_SECTORS(conf),
!test_bit(STRIPE_DEGRADED, &sh->state), !test_bit(STRIPE_DEGRADED, &sh->state),
0); 0);
} }
...@@ -362,7 +362,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf) ...@@ -362,7 +362,7 @@ void r5c_check_cached_full_stripe(struct r5conf *conf)
*/ */
if (atomic_read(&conf->r5c_cached_full_stripes) >= if (atomic_read(&conf->r5c_cached_full_stripes) >=
min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), min(R5C_FULL_STRIPE_FLUSH_BATCH(conf),
conf->chunk_sectors >> STRIPE_SHIFT)) conf->chunk_sectors >> RAID5_STRIPE_SHIFT(conf)))
r5l_wake_reclaim(conf->log, 0); r5l_wake_reclaim(conf->log, 0);
} }
......
...@@ -324,7 +324,7 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) ...@@ -324,7 +324,7 @@ static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh)
* be just after the last logged stripe and write to the same * be just after the last logged stripe and write to the same
* disks. Use bit shift and logarithm to avoid 64-bit division. * disks. Use bit shift and logarithm to avoid 64-bit division.
*/ */
if ((sh->sector == sh_last->sector + STRIPE_SECTORS) && if ((sh->sector == sh_last->sector + RAID5_STRIPE_SECTORS(conf)) &&
(data_sector >> ilog2(conf->chunk_sectors) == (data_sector >> ilog2(conf->chunk_sectors) ==
data_sector_last >> ilog2(conf->chunk_sectors)) && data_sector_last >> ilog2(conf->chunk_sectors)) &&
((data_sector - data_sector_last) * data_disks == ((data_sector - data_sector_last) * data_disks ==
...@@ -844,9 +844,9 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, ...@@ -844,9 +844,9 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e,
/* if start and end is 4k aligned, use a 4k block */ /* if start and end is 4k aligned, use a 4k block */
if (block_size == 512 && if (block_size == 512 &&
(r_sector_first & (STRIPE_SECTORS - 1)) == 0 && (r_sector_first & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0 &&
(r_sector_last & (STRIPE_SECTORS - 1)) == 0) (r_sector_last & (RAID5_STRIPE_SECTORS(conf) - 1)) == 0)
block_size = STRIPE_SIZE; block_size = RAID5_STRIPE_SIZE(conf);
/* iterate through blocks in strip */ /* iterate through blocks in strip */
for (i = 0; i < strip_sectors; i += (block_size >> 9)) { for (i = 0; i < strip_sectors; i += (block_size >> 9)) {
...@@ -1274,7 +1274,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev) ...@@ -1274,7 +1274,8 @@ static int ppl_validate_rdev(struct md_rdev *rdev)
ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9); ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9);
if (ppl_data_sectors > 0) if (ppl_data_sectors > 0)
ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS); ppl_data_sectors = rounddown(ppl_data_sectors,
RAID5_STRIPE_SECTORS((struct r5conf *)rdev->mddev->private));
if (ppl_data_sectors <= 0) { if (ppl_data_sectors <= 0) {
pr_warn("md/raid:%s: PPL space too small on %s\n", pr_warn("md/raid:%s: PPL space too small on %s\n",
......
This diff is collapsed.
...@@ -481,23 +481,6 @@ struct disk_info { ...@@ -481,23 +481,6 @@ struct disk_info {
#define HASH_MASK (NR_HASH - 1) #define HASH_MASK (NR_HASH - 1)
#define MAX_STRIPE_BATCH 8 #define MAX_STRIPE_BATCH 8
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This function is used to determine the 'next' bio in the list, given the
* sector of the current stripe+device
*/
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{
if (bio_end_sector(bio) < sector + STRIPE_SECTORS)
return bio->bi_next;
else
return NULL;
}
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64. /* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
* This is because we sometimes take all the spinlocks * This is because we sometimes take all the spinlocks
* and creating that much locking depth can cause * and creating that much locking depth can cause
...@@ -690,6 +673,26 @@ struct r5conf { ...@@ -690,6 +673,26 @@ struct r5conf {
struct r5pending_data *next_pending_data; struct r5pending_data *next_pending_data;
}; };
#define RAID5_STRIPE_SIZE(conf) STRIPE_SIZE
#define RAID5_STRIPE_SHIFT(conf) STRIPE_SHIFT
#define RAID5_STRIPE_SECTORS(conf) STRIPE_SECTORS
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This function is used to determine the 'next' bio in the list, given the
* sector of the current stripe+device
*/
static inline struct bio *r5_next_bio(struct r5conf *conf, struct bio *bio, sector_t sector)
{
if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf))
return bio->bi_next;
else
return NULL;
}
/* /*
* Our supported algorithms * Our supported algorithms
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment