Commit 9729a6eb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md: (39 commits)
  md/raid5: correctly update sync_completed when we reach max_resync
  md/raid5: add missing call to schedule() after prepare_to_wait()
  md/linear: use call_rcu to free obsolete 'conf' structures.
  md linear: Protecting mddev with rcu locks to avoid races
  md: Move check for bitmap presence to personality code.
  md: remove chunksize rounding from common code.
  md: raid0/linear: ensure device sizes are rounded to chunk size.
  md: move assignment of ->utime so that it never gets skipped.
  md: Push down reconstruction log message to personality code.
  md: merge reconfig and check_reshape methods.
  md: remove unnecessary arguments from ->reconfig method.
  md: raid5: check stripe cache is large enough in start_reshape
  md: raid0: chunk_sectors cleanups.
  md: fix some comments.
  md/raid5: Use is_power_of_2() in raid5_reconfig()/raid6_reconfig().
  md: convert conf->chunk_size and conf->prev_chunk to sectors.
  md: Convert mddev->new_chunk to sectors.
  md: Make mddev->chunk_size sector-based.
  md: raid0 :Enables chunk size other than powers of 2.
  md: prepare for non-power-of-two chunk sizes
  ...
parents 5ae8606d 48606a9f
...@@ -255,14 +255,14 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -255,14 +255,14 @@ static void status(struct seq_file *seq, mddev_t *mddev)
} }
static int reconfig(mddev_t *mddev, int layout, int chunk_size) static int reshape(mddev_t *mddev)
{ {
int mode = layout & ModeMask; int mode = mddev->new_layout & ModeMask;
int count = layout >> ModeShift; int count = mddev->new_layout >> ModeShift;
conf_t *conf = mddev->private; conf_t *conf = mddev->private;
if (chunk_size != -1) if (mddev->new_layout < 0)
return -EINVAL; return 0;
/* new layout */ /* new layout */
if (mode == ClearFaults) if (mode == ClearFaults)
...@@ -279,6 +279,7 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size) ...@@ -279,6 +279,7 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size)
atomic_set(&conf->counters[mode], count); atomic_set(&conf->counters[mode], count);
} else } else
return -EINVAL; return -EINVAL;
mddev->new_layout = -1;
mddev->layout = -1; /* makes sure further changes come through */ mddev->layout = -1; /* makes sure further changes come through */
return 0; return 0;
} }
...@@ -298,8 +299,12 @@ static int run(mddev_t *mddev) ...@@ -298,8 +299,12 @@ static int run(mddev_t *mddev)
{ {
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
int i; int i;
conf_t *conf;
if (md_check_no_bitmap(mddev))
return -EINVAL;
conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); conf = kmalloc(sizeof(*conf), GFP_KERNEL);
if (!conf) if (!conf)
return -ENOMEM; return -ENOMEM;
...@@ -315,7 +320,7 @@ static int run(mddev_t *mddev) ...@@ -315,7 +320,7 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
mddev->private = conf; mddev->private = conf;
reconfig(mddev, mddev->layout, -1); reshape(mddev);
return 0; return 0;
} }
...@@ -338,7 +343,7 @@ static struct mdk_personality faulty_personality = ...@@ -338,7 +343,7 @@ static struct mdk_personality faulty_personality =
.run = run, .run = run,
.stop = stop, .stop = stop,
.status = status, .status = status,
.reconfig = reconfig, .check_reshape = reshape,
.size = faulty_size, .size = faulty_size,
}; };
......
...@@ -27,19 +27,27 @@ ...@@ -27,19 +27,27 @@
*/ */
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
{ {
dev_info_t *hash; int lo, mid, hi;
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf;
sector_t idx = sector >> conf->sector_shift;
lo = 0;
hi = mddev->raid_disks - 1;
conf = rcu_dereference(mddev->private);
/* /*
* sector_div(a,b) returns the remainer and sets a to a/b * Binary Search
*/ */
(void)sector_div(idx, conf->spacing);
hash = conf->hash_table[idx];
while (sector >= hash->num_sectors + hash->start_sector) while (hi > lo) {
hash++;
return hash; mid = (hi + lo) / 2;
if (sector < conf->disks[mid].end_sector)
hi = mid;
else
lo = mid + 1;
}
return conf->disks + lo;
} }
/** /**
...@@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q, ...@@ -59,8 +67,10 @@ static int linear_mergeable_bvec(struct request_queue *q,
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
rcu_read_lock();
dev0 = which_dev(mddev, sector); dev0 = which_dev(mddev, sector);
maxsectors = dev0->num_sectors - (sector - dev0->start_sector); maxsectors = dev0->end_sector - sector;
rcu_read_unlock();
if (maxsectors < bio_sectors) if (maxsectors < bio_sectors)
maxsectors = 0; maxsectors = 0;
...@@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q, ...@@ -79,46 +89,57 @@ static int linear_mergeable_bvec(struct request_queue *q,
static void linear_unplug(struct request_queue *q) static void linear_unplug(struct request_queue *q)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf;
int i; int i;
rcu_read_lock();
conf = rcu_dereference(mddev->private);
for (i=0; i < mddev->raid_disks; i++) { for (i=0; i < mddev->raid_disks; i++) {
struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
blk_unplug(r_queue); blk_unplug(r_queue);
} }
rcu_read_unlock();
} }
static int linear_congested(void *data, int bits) static int linear_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf;
int i, ret = 0; int i, ret = 0;
rcu_read_lock();
conf = rcu_dereference(mddev->private);
for (i = 0; i < mddev->raid_disks && !ret ; i++) { for (i = 0; i < mddev->raid_disks && !ret ; i++) {
struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits); ret |= bdi_congested(&q->backing_dev_info, bits);
} }
rcu_read_unlock();
return ret; return ret;
} }
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{ {
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf;
sector_t array_sectors;
rcu_read_lock();
conf = rcu_dereference(mddev->private);
WARN_ONCE(sectors || raid_disks, WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__); "%s does not support generic reshape\n", __func__);
array_sectors = conf->array_sectors;
rcu_read_unlock();
return conf->array_sectors; return array_sectors;
} }
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
{ {
linear_conf_t *conf; linear_conf_t *conf;
dev_info_t **table;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
int i, nb_zone, cnt; int i, cnt;
sector_t min_sectors;
sector_t curr_sector;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t),
GFP_KERNEL); GFP_KERNEL);
...@@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -131,6 +152,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
list_for_each_entry(rdev, &mddev->disks, same_set) { list_for_each_entry(rdev, &mddev->disks, same_set) {
int j = rdev->raid_disk; int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j; dev_info_t *disk = conf->disks + j;
sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) { if (j < 0 || j >= raid_disks || disk->rdev) {
printk("linear: disk numbering problem. Aborting!\n"); printk("linear: disk numbering problem. Aborting!\n");
...@@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -138,6 +160,11 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
} }
disk->rdev = rdev; disk->rdev = rdev;
if (mddev->chunk_sectors) {
sectors = rdev->sectors;
sector_div(sectors, mddev->chunk_sectors);
rdev->sectors = sectors * mddev->chunk_sectors;
}
blk_queue_stack_limits(mddev->queue, blk_queue_stack_limits(mddev->queue,
rdev->bdev->bd_disk->queue); rdev->bdev->bd_disk->queue);
...@@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -149,102 +176,24 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;
conf->array_sectors += rdev->sectors; conf->array_sectors += rdev->sectors;
cnt++; cnt++;
} }
if (cnt != raid_disks) { if (cnt != raid_disks) {
printk("linear: not enough drives present. Aborting!\n"); printk("linear: not enough drives present. Aborting!\n");
goto out; goto out;
} }
min_sectors = conf->array_sectors;
sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *));
if (min_sectors == 0)
min_sectors = 1;
/* min_sectors is the minimum spacing that will fit the hash
* table in one PAGE. This may be much smaller than needed.
* We find the smallest non-terminal set of consecutive devices
* that is larger than min_sectors and use the size of that as
* the actual spacing
*/
conf->spacing = conf->array_sectors;
for (i=0; i < cnt-1 ; i++) {
sector_t tmp = 0;
int j;
for (j = i; j < cnt - 1 && tmp < min_sectors; j++)
tmp += conf->disks[j].num_sectors;
if (tmp >= min_sectors && tmp < conf->spacing)
conf->spacing = tmp;
}
/* spacing may be too large for sector_div to work with,
* so we might need to pre-shift
*/
conf->sector_shift = 0;
if (sizeof(sector_t) > sizeof(u32)) {
sector_t space = conf->spacing;
while (space > (sector_t)(~(u32)0)) {
space >>= 1;
conf->sector_shift++;
}
}
/* /*
* This code was restructured to work around a gcc-2.95.3 internal * Here we calculate the device offsets.
* compiler error. Alter it with care.
*/ */
{ conf->disks[0].end_sector = conf->disks[0].rdev->sectors;
sector_t sz;
unsigned round;
unsigned long base;
sz = conf->array_sectors >> conf->sector_shift;
sz += 1; /* force round-up */
base = conf->spacing >> conf->sector_shift;
round = sector_div(sz, base);
nb_zone = sz + (round ? 1 : 0);
}
BUG_ON(nb_zone > PAGE_SIZE / sizeof(struct dev_info *));
conf->hash_table = kmalloc (sizeof (struct dev_info *) * nb_zone,
GFP_KERNEL);
if (!conf->hash_table)
goto out;
/*
* Here we generate the linear hash table
* First calculate the device offsets.
*/
conf->disks[0].start_sector = 0;
for (i = 1; i < raid_disks; i++) for (i = 1; i < raid_disks; i++)
conf->disks[i].start_sector = conf->disks[i].end_sector =
conf->disks[i-1].start_sector + conf->disks[i-1].end_sector +
conf->disks[i-1].num_sectors; conf->disks[i].rdev->sectors;
table = conf->hash_table;
i = 0;
for (curr_sector = 0;
curr_sector < conf->array_sectors;
curr_sector += conf->spacing) {
while (i < raid_disks-1 &&
curr_sector >= conf->disks[i+1].start_sector)
i++;
*table ++ = conf->disks + i;
}
if (conf->sector_shift) {
conf->spacing >>= conf->sector_shift;
/* round spacing up so that when we divide by it,
* we err on the side of "too-low", which is safest.
*/
conf->spacing++;
}
BUG_ON(table - conf->hash_table > nb_zone);
return conf; return conf;
...@@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev) ...@@ -257,6 +206,8 @@ static int linear_run (mddev_t *mddev)
{ {
linear_conf_t *conf; linear_conf_t *conf;
if (md_check_no_bitmap(mddev))
return -EINVAL;
mddev->queue->queue_lock = &mddev->queue->__queue_lock; mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks); conf = linear_conf(mddev, mddev->raid_disks);
...@@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev) ...@@ -272,6 +223,12 @@ static int linear_run (mddev_t *mddev)
return 0; return 0;
} }
static void free_conf(struct rcu_head *head)
{
linear_conf_t *conf = container_of(head, linear_conf_t, rcu);
kfree(conf);
}
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
{ {
/* Adding a drive to a linear array allows the array to grow. /* Adding a drive to a linear array allows the array to grow.
...@@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -282,7 +239,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
* The current one is never freed until the array is stopped. * The current one is never freed until the array is stopped.
* This avoids races. * This avoids races.
*/ */
linear_conf_t *newconf; linear_conf_t *newconf, *oldconf;
if (rdev->saved_raid_disk != mddev->raid_disks) if (rdev->saved_raid_disk != mddev->raid_disks)
return -EINVAL; return -EINVAL;
...@@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -294,25 +251,29 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
if (!newconf) if (!newconf)
return -ENOMEM; return -ENOMEM;
newconf->prev = mddev_to_conf(mddev); oldconf = rcu_dereference(mddev->private);
mddev->private = newconf;
mddev->raid_disks++; mddev->raid_disks++;
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity(mddev->gendisk, mddev->array_sectors); set_capacity(mddev->gendisk, mddev->array_sectors);
call_rcu(&oldconf->rcu, free_conf);
return 0; return 0;
} }
static int linear_stop (mddev_t *mddev) static int linear_stop (mddev_t *mddev)
{ {
linear_conf_t *conf = mddev_to_conf(mddev); linear_conf_t *conf = mddev->private;
/*
* We do not require rcu protection here since
* we hold reconfig_mutex for both linear_add and
* linear_stop, so they cannot race.
* We should make sure any old 'conf's are properly
* freed though.
*/
rcu_barrier();
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
do { kfree(conf);
linear_conf_t *t = conf->prev;
kfree(conf->hash_table);
kfree(conf);
conf = t;
} while (conf);
return 0; return 0;
} }
...@@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) ...@@ -322,6 +283,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
dev_info_t *tmp_dev; dev_info_t *tmp_dev;
sector_t start_sector;
int cpu; int cpu;
if (unlikely(bio_barrier(bio))) { if (unlikely(bio_barrier(bio))) {
...@@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) ...@@ -335,33 +297,36 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
bio_sectors(bio)); bio_sectors(bio));
part_stat_unlock(); part_stat_unlock();
rcu_read_lock();
tmp_dev = which_dev(mddev, bio->bi_sector); tmp_dev = which_dev(mddev, bio->bi_sector);
start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors +
tmp_dev->start_sector)
|| (bio->bi_sector < if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
tmp_dev->start_sector))) { || (bio->bi_sector < start_sector))) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk("linear_make_request: Sector %llu out of bounds on " printk("linear_make_request: Sector %llu out of bounds on "
"dev %s: %llu sectors, offset %llu\n", "dev %s: %llu sectors, offset %llu\n",
(unsigned long long)bio->bi_sector, (unsigned long long)bio->bi_sector,
bdevname(tmp_dev->rdev->bdev, b), bdevname(tmp_dev->rdev->bdev, b),
(unsigned long long)tmp_dev->num_sectors, (unsigned long long)tmp_dev->rdev->sectors,
(unsigned long long)tmp_dev->start_sector); (unsigned long long)start_sector);
rcu_read_unlock();
bio_io_error(bio); bio_io_error(bio);
return 0; return 0;
} }
if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > if (unlikely(bio->bi_sector + (bio->bi_size >> 9) >
tmp_dev->start_sector + tmp_dev->num_sectors)) { tmp_dev->end_sector)) {
/* This bio crosses a device boundary, so we have to /* This bio crosses a device boundary, so we have to
* split it. * split it.
*/ */
struct bio_pair *bp; struct bio_pair *bp;
sector_t end_sector = tmp_dev->end_sector;
rcu_read_unlock();
bp = bio_split(bio, bp = bio_split(bio, end_sector - bio->bi_sector);
tmp_dev->start_sector + tmp_dev->num_sectors
- bio->bi_sector);
if (linear_make_request(q, &bp->bio1)) if (linear_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1); generic_make_request(&bp->bio1);
...@@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) ...@@ -372,8 +337,9 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
} }
bio->bi_bdev = tmp_dev->rdev->bdev; bio->bi_bdev = tmp_dev->rdev->bdev;
bio->bi_sector = bio->bi_sector - tmp_dev->start_sector bio->bi_sector = bio->bi_sector - start_sector
+ tmp_dev->rdev->data_offset; + tmp_dev->rdev->data_offset;
rcu_read_unlock();
return 1; return 1;
} }
...@@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) ...@@ -381,7 +347,7 @@ static int linear_make_request (struct request_queue *q, struct bio *bio)
static void linear_status (struct seq_file *seq, mddev_t *mddev) static void linear_status (struct seq_file *seq, mddev_t *mddev)
{ {
seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
} }
......
...@@ -3,27 +3,19 @@ ...@@ -3,27 +3,19 @@
struct dev_info { struct dev_info {
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
sector_t num_sectors; sector_t end_sector;
sector_t start_sector;
}; };
typedef struct dev_info dev_info_t; typedef struct dev_info dev_info_t;
struct linear_private_data struct linear_private_data
{ {
struct linear_private_data *prev; /* earlier version */
dev_info_t **hash_table;
sector_t spacing;
sector_t array_sectors; sector_t array_sectors;
int sector_shift; /* shift before dividing
* by spacing
*/
dev_info_t disks[0]; dev_info_t disks[0];
struct rcu_head rcu;
}; };
typedef struct linear_private_data linear_conf_t; typedef struct linear_private_data linear_conf_t;
#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
#endif #endif
...@@ -440,15 +440,6 @@ static inline sector_t calc_dev_sboffset(struct block_device *bdev) ...@@ -440,15 +440,6 @@ static inline sector_t calc_dev_sboffset(struct block_device *bdev)
return MD_NEW_SIZE_SECTORS(num_sectors); return MD_NEW_SIZE_SECTORS(num_sectors);
} }
static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size)
{
sector_t num_sectors = rdev->sb_start;
if (chunk_size)
num_sectors &= ~((sector_t)chunk_size/512 - 1);
return num_sectors;
}
static int alloc_disk_sb(mdk_rdev_t * rdev) static int alloc_disk_sb(mdk_rdev_t * rdev)
{ {
if (rdev->sb_page) if (rdev->sb_page)
...@@ -744,6 +735,24 @@ struct super_type { ...@@ -744,6 +735,24 @@ struct super_type {
sector_t num_sectors); sector_t num_sectors);
}; };
/*
* Check that the given mddev has no bitmap.
*
* This function is called from the run method of all personalities that do not
* support bitmaps. It prints an error message and returns non-zero if mddev
* has a bitmap. Otherwise, it returns 0.
*
*/
int md_check_no_bitmap(mddev_t *mddev)
{
if (!mddev->bitmap_file && !mddev->bitmap_offset)
return 0;
printk(KERN_ERR "%s: bitmaps are not supported for %s\n",
mdname(mddev), mddev->pers->name);
return 1;
}
EXPORT_SYMBOL(md_check_no_bitmap);
/* /*
* load_super for 0.90.0 * load_super for 0.90.0
*/ */
...@@ -797,17 +806,6 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ...@@ -797,17 +806,6 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
rdev->data_offset = 0; rdev->data_offset = 0;
rdev->sb_size = MD_SB_BYTES; rdev->sb_size = MD_SB_BYTES;
if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) {
if (sb->level != 1 && sb->level != 4
&& sb->level != 5 && sb->level != 6
&& sb->level != 10) {
/* FIXME use a better test */
printk(KERN_WARNING
"md: bitmaps not supported for this level.\n");
goto abort;
}
}
if (sb->level == LEVEL_MULTIPATH) if (sb->level == LEVEL_MULTIPATH)
rdev->desc_nr = -1; rdev->desc_nr = -1;
else else
...@@ -836,7 +834,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version ...@@ -836,7 +834,7 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
else else
ret = 0; ret = 0;
} }
rdev->sectors = calc_num_sectors(rdev, sb->chunk_size); rdev->sectors = rdev->sb_start;
if (rdev->sectors < sb->size * 2 && sb->level > 1) if (rdev->sectors < sb->size * 2 && sb->level > 1)
/* "this cannot possibly happen" ... */ /* "this cannot possibly happen" ... */
...@@ -866,7 +864,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -866,7 +864,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->minor_version = sb->minor_version; mddev->minor_version = sb->minor_version;
mddev->patch_version = sb->patch_version; mddev->patch_version = sb->patch_version;
mddev->external = 0; mddev->external = 0;
mddev->chunk_size = sb->chunk_size; mddev->chunk_sectors = sb->chunk_size >> 9;
mddev->ctime = sb->ctime; mddev->ctime = sb->ctime;
mddev->utime = sb->utime; mddev->utime = sb->utime;
mddev->level = sb->level; mddev->level = sb->level;
...@@ -883,13 +881,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -883,13 +881,13 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = sb->delta_disks; mddev->delta_disks = sb->delta_disks;
mddev->new_level = sb->new_level; mddev->new_level = sb->new_level;
mddev->new_layout = sb->new_layout; mddev->new_layout = sb->new_layout;
mddev->new_chunk = sb->new_chunk; mddev->new_chunk_sectors = sb->new_chunk >> 9;
} else { } else {
mddev->reshape_position = MaxSector; mddev->reshape_position = MaxSector;
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
} }
if (sb->state & (1<<MD_SB_CLEAN)) if (sb->state & (1<<MD_SB_CLEAN))
...@@ -1004,7 +1002,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1004,7 +1002,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_level = mddev->new_level; sb->new_level = mddev->new_level;
sb->delta_disks = mddev->delta_disks; sb->delta_disks = mddev->delta_disks;
sb->new_layout = mddev->new_layout; sb->new_layout = mddev->new_layout;
sb->new_chunk = mddev->new_chunk; sb->new_chunk = mddev->new_chunk_sectors << 9;
} }
mddev->minor_version = sb->minor_version; mddev->minor_version = sb->minor_version;
if (mddev->in_sync) if (mddev->in_sync)
...@@ -1018,7 +1016,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1018,7 +1016,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->recovery_cp = 0; sb->recovery_cp = 0;
sb->layout = mddev->layout; sb->layout = mddev->layout;
sb->chunk_size = mddev->chunk_size; sb->chunk_size = mddev->chunk_sectors << 9;
if (mddev->bitmap && mddev->bitmap_file == NULL) if (mddev->bitmap && mddev->bitmap_file == NULL)
sb->state |= (1<<MD_SB_BITMAP_PRESENT); sb->state |= (1<<MD_SB_BITMAP_PRESENT);
...@@ -1185,17 +1183,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -1185,17 +1183,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
bdevname(rdev->bdev,b)); bdevname(rdev->bdev,b));
return -EINVAL; return -EINVAL;
} }
if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) {
if (sb->level != cpu_to_le32(1) &&
sb->level != cpu_to_le32(4) &&
sb->level != cpu_to_le32(5) &&
sb->level != cpu_to_le32(6) &&
sb->level != cpu_to_le32(10)) {
printk(KERN_WARNING
"md: bitmaps not supported for this level.\n");
return -EINVAL;
}
}
rdev->preferred_minor = 0xffff; rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset); rdev->data_offset = le64_to_cpu(sb->data_offset);
...@@ -1248,9 +1235,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -1248,9 +1235,6 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
if (rdev->sectors < le64_to_cpu(sb->data_size)) if (rdev->sectors < le64_to_cpu(sb->data_size))
return -EINVAL; return -EINVAL;
rdev->sectors = le64_to_cpu(sb->data_size); rdev->sectors = le64_to_cpu(sb->data_size);
if (le32_to_cpu(sb->chunksize))
rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1);
if (le64_to_cpu(sb->size) > rdev->sectors) if (le64_to_cpu(sb->size) > rdev->sectors)
return -EINVAL; return -EINVAL;
return ret; return ret;
...@@ -1271,7 +1255,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1271,7 +1255,7 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->major_version = 1; mddev->major_version = 1;
mddev->patch_version = 0; mddev->patch_version = 0;
mddev->external = 0; mddev->external = 0;
mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; mddev->chunk_sectors = le32_to_cpu(sb->chunksize);
mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1);
mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1);
mddev->level = le32_to_cpu(sb->level); mddev->level = le32_to_cpu(sb->level);
...@@ -1297,13 +1281,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1297,13 +1281,13 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev)
mddev->delta_disks = le32_to_cpu(sb->delta_disks); mddev->delta_disks = le32_to_cpu(sb->delta_disks);
mddev->new_level = le32_to_cpu(sb->new_level); mddev->new_level = le32_to_cpu(sb->new_level);
mddev->new_layout = le32_to_cpu(sb->new_layout); mddev->new_layout = le32_to_cpu(sb->new_layout);
mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk);
} else { } else {
mddev->reshape_position = MaxSector; mddev->reshape_position = MaxSector;
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
} }
} else if (mddev->pers == NULL) { } else if (mddev->pers == NULL) {
...@@ -1375,7 +1359,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1375,7 +1359,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->raid_disks = cpu_to_le32(mddev->raid_disks); sb->raid_disks = cpu_to_le32(mddev->raid_disks);
sb->size = cpu_to_le64(mddev->dev_sectors); sb->size = cpu_to_le64(mddev->dev_sectors);
sb->chunksize = cpu_to_le32(mddev->chunk_size >> 9); sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level); sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout); sb->layout = cpu_to_le32(mddev->layout);
...@@ -1402,7 +1386,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -1402,7 +1386,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
sb->new_layout = cpu_to_le32(mddev->new_layout); sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->delta_disks = cpu_to_le32(mddev->delta_disks); sb->delta_disks = cpu_to_le32(mddev->delta_disks);
sb->new_level = cpu_to_le32(mddev->new_level); sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors);
} }
max_dev = 0; max_dev = 0;
...@@ -1897,6 +1881,7 @@ static void md_update_sb(mddev_t * mddev, int force_change) ...@@ -1897,6 +1881,7 @@ static void md_update_sb(mddev_t * mddev, int force_change)
int sync_req; int sync_req;
int nospares = 0; int nospares = 0;
mddev->utime = get_seconds();
if (mddev->external) if (mddev->external)
return; return;
repeat: repeat:
...@@ -1926,7 +1911,6 @@ static void md_update_sb(mddev_t * mddev, int force_change) ...@@ -1926,7 +1911,6 @@ static void md_update_sb(mddev_t * mddev, int force_change)
nospares = 0; nospares = 0;
sync_req = mddev->in_sync; sync_req = mddev->in_sync;
mddev->utime = get_seconds();
/* If this is just a dirty<->clean transition, and the array is clean /* If this is just a dirty<->clean transition, and the array is clean
* and 'events' is odd, we can roll back to the previous clean state */ * and 'events' is odd, we can roll back to the previous clean state */
...@@ -2597,15 +2581,6 @@ static void analyze_sbs(mddev_t * mddev) ...@@ -2597,15 +2581,6 @@ static void analyze_sbs(mddev_t * mddev)
clear_bit(In_sync, &rdev->flags); clear_bit(In_sync, &rdev->flags);
} }
} }
if (mddev->recovery_cp != MaxSector &&
mddev->level >= 1)
printk(KERN_ERR "md: %s: raid array is not clean"
" -- starting background reconstruction\n",
mdname(mddev));
} }
static void md_safemode_timeout(unsigned long data); static void md_safemode_timeout(unsigned long data);
...@@ -2746,7 +2721,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2746,7 +2721,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
if (IS_ERR(priv)) { if (IS_ERR(priv)) {
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks -= mddev->delta_disks; mddev->raid_disks -= mddev->delta_disks;
mddev->delta_disks = 0; mddev->delta_disks = 0;
module_put(pers->owner); module_put(pers->owner);
...@@ -2764,7 +2739,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2764,7 +2739,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len)
strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel));
mddev->level = mddev->new_level; mddev->level = mddev->new_level;
mddev->layout = mddev->new_layout; mddev->layout = mddev->new_layout;
mddev->chunk_size = mddev->new_chunk; mddev->chunk_sectors = mddev->new_chunk_sectors;
mddev->delta_disks = 0; mddev->delta_disks = 0;
pers->run(mddev); pers->run(mddev);
mddev_resume(mddev); mddev_resume(mddev);
...@@ -2800,11 +2775,14 @@ layout_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2800,11 +2775,14 @@ layout_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) { if (mddev->pers) {
int err; int err;
if (mddev->pers->reconfig == NULL) if (mddev->pers->check_reshape == NULL)
return -EBUSY; return -EBUSY;
err = mddev->pers->reconfig(mddev, n, -1); mddev->new_layout = n;
if (err) err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_layout = mddev->layout;
return err; return err;
}
} else { } else {
mddev->new_layout = n; mddev->new_layout = n;
if (mddev->reshape_position == MaxSector) if (mddev->reshape_position == MaxSector)
...@@ -2857,10 +2835,11 @@ static ssize_t ...@@ -2857,10 +2835,11 @@ static ssize_t
chunk_size_show(mddev_t *mddev, char *page) chunk_size_show(mddev_t *mddev, char *page)
{ {
if (mddev->reshape_position != MaxSector && if (mddev->reshape_position != MaxSector &&
mddev->chunk_size != mddev->new_chunk) mddev->chunk_sectors != mddev->new_chunk_sectors)
return sprintf(page, "%d (%d)\n", mddev->new_chunk, return sprintf(page, "%d (%d)\n",
mddev->chunk_size); mddev->new_chunk_sectors << 9,
return sprintf(page, "%d\n", mddev->chunk_size); mddev->chunk_sectors << 9);
return sprintf(page, "%d\n", mddev->chunk_sectors << 9);
} }
static ssize_t static ssize_t
...@@ -2874,15 +2853,18 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -2874,15 +2853,18 @@ chunk_size_store(mddev_t *mddev, const char *buf, size_t len)
if (mddev->pers) { if (mddev->pers) {
int err; int err;
if (mddev->pers->reconfig == NULL) if (mddev->pers->check_reshape == NULL)
return -EBUSY; return -EBUSY;
err = mddev->pers->reconfig(mddev, -1, n); mddev->new_chunk_sectors = n >> 9;
if (err) err = mddev->pers->check_reshape(mddev);
if (err) {
mddev->new_chunk_sectors = mddev->chunk_sectors;
return err; return err;
}
} else { } else {
mddev->new_chunk = n; mddev->new_chunk_sectors = n >> 9;
if (mddev->reshape_position == MaxSector) if (mddev->reshape_position == MaxSector)
mddev->chunk_size = n; mddev->chunk_sectors = n >> 9;
} }
return len; return len;
} }
...@@ -3527,8 +3509,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3527,8 +3509,9 @@ min_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY; return -EBUSY;
/* Must be a multiple of chunk_size */ /* Must be a multiple of chunk_size */
if (mddev->chunk_size) { if (mddev->chunk_sectors) {
if (min & (sector_t)((mddev->chunk_size>>9)-1)) sector_t temp = min;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL; return -EINVAL;
} }
mddev->resync_min = min; mddev->resync_min = min;
...@@ -3564,8 +3547,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3564,8 +3547,9 @@ max_sync_store(mddev_t *mddev, const char *buf, size_t len)
return -EBUSY; return -EBUSY;
/* Must be a multiple of chunk_size */ /* Must be a multiple of chunk_size */
if (mddev->chunk_size) { if (mddev->chunk_sectors) {
if (max & (sector_t)((mddev->chunk_size>>9)-1)) sector_t temp = max;
if (sector_div(temp, mddev->chunk_sectors))
return -EINVAL; return -EINVAL;
} }
mddev->resync_max = max; mddev->resync_max = max;
...@@ -3656,7 +3640,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len) ...@@ -3656,7 +3640,7 @@ reshape_position_store(mddev_t *mddev, const char *buf, size_t len)
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
return len; return len;
} }
...@@ -3976,11 +3960,9 @@ static int start_dirty_degraded; ...@@ -3976,11 +3960,9 @@ static int start_dirty_degraded;
static int do_md_run(mddev_t * mddev) static int do_md_run(mddev_t * mddev)
{ {
int err; int err;
int chunk_size;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
struct gendisk *disk; struct gendisk *disk;
struct mdk_personality *pers; struct mdk_personality *pers;
char b[BDEVNAME_SIZE];
if (list_empty(&mddev->disks)) if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */ /* cannot run an array with no devices.. */
...@@ -3998,38 +3980,6 @@ static int do_md_run(mddev_t * mddev) ...@@ -3998,38 +3980,6 @@ static int do_md_run(mddev_t * mddev)
analyze_sbs(mddev); analyze_sbs(mddev);
} }
chunk_size = mddev->chunk_size;
if (chunk_size) {
if (chunk_size > MAX_CHUNK_SIZE) {
printk(KERN_ERR "too big chunk_size: %d > %d\n",
chunk_size, MAX_CHUNK_SIZE);
return -EINVAL;
}
/*
* chunk-size has to be a power of 2
*/
if ( (1 << ffz(~chunk_size)) != chunk_size) {
printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size);
return -EINVAL;
}
/* devices must have minimum size of one chunk */
list_for_each_entry(rdev, &mddev->disks, same_set) {
if (test_bit(Faulty, &rdev->flags))
continue;
if (rdev->sectors < chunk_size / 512) {
printk(KERN_WARNING
"md: Dev %s smaller than chunk_size:"
" %llu < %d\n",
bdevname(rdev->bdev,b),
(unsigned long long)rdev->sectors,
chunk_size / 512);
return -EINVAL;
}
}
}
if (mddev->level != LEVEL_NONE) if (mddev->level != LEVEL_NONE)
request_module("md-level-%d", mddev->level); request_module("md-level-%d", mddev->level);
else if (mddev->clevel[0]) else if (mddev->clevel[0])
...@@ -4405,7 +4355,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) ...@@ -4405,7 +4355,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->flags = 0; mddev->flags = 0;
mddev->ro = 0; mddev->ro = 0;
mddev->metadata_type[0] = 0; mddev->metadata_type[0] = 0;
mddev->chunk_size = 0; mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0; mddev->ctime = mddev->utime = 0;
mddev->layout = 0; mddev->layout = 0;
mddev->max_disks = 0; mddev->max_disks = 0;
...@@ -4413,7 +4363,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) ...@@ -4413,7 +4363,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
mddev->delta_disks = 0; mddev->delta_disks = 0;
mddev->new_level = LEVEL_NONE; mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0; mddev->new_layout = 0;
mddev->new_chunk = 0; mddev->new_chunk_sectors = 0;
mddev->curr_resync = 0; mddev->curr_resync = 0;
mddev->resync_mismatches = 0; mddev->resync_mismatches = 0;
mddev->suspend_lo = mddev->suspend_hi = 0; mddev->suspend_lo = mddev->suspend_hi = 0;
...@@ -4618,7 +4568,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg) ...@@ -4618,7 +4568,7 @@ static int get_array_info(mddev_t * mddev, void __user * arg)
info.spare_disks = spare; info.spare_disks = spare;
info.layout = mddev->layout; info.layout = mddev->layout;
info.chunk_size = mddev->chunk_size; info.chunk_size = mddev->chunk_sectors << 9;
if (copy_to_user(arg, &info, sizeof(info))) if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT; return -EFAULT;
...@@ -4843,7 +4793,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -4843,7 +4793,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
} else } else
rdev->sb_start = calc_dev_sboffset(rdev->bdev); rdev->sb_start = calc_dev_sboffset(rdev->bdev);
rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); rdev->sectors = rdev->sb_start;
err = bind_rdev_to_array(rdev, mddev); err = bind_rdev_to_array(rdev, mddev);
if (err) { if (err) {
...@@ -4913,7 +4863,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -4913,7 +4863,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
else else
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); rdev->sectors = rdev->sb_start;
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
printk(KERN_WARNING printk(KERN_WARNING
...@@ -5062,7 +5012,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) ...@@ -5062,7 +5012,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
mddev->external = 0; mddev->external = 0;
mddev->layout = info->layout; mddev->layout = info->layout;
mddev->chunk_size = info->chunk_size; mddev->chunk_sectors = info->chunk_size >> 9;
mddev->max_disks = MD_SB_DISKS; mddev->max_disks = MD_SB_DISKS;
...@@ -5081,7 +5031,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) ...@@ -5081,7 +5031,7 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
get_random_bytes(mddev->uuid, 16); get_random_bytes(mddev->uuid, 16);
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->delta_disks = 0; mddev->delta_disks = 0;
...@@ -5191,7 +5141,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) ...@@ -5191,7 +5141,7 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
mddev->level != info->level || mddev->level != info->level ||
/* mddev->layout != info->layout || */ /* mddev->layout != info->layout || */
!mddev->persistent != info->not_persistent|| !mddev->persistent != info->not_persistent||
mddev->chunk_size != info->chunk_size || mddev->chunk_sectors != info->chunk_size >> 9 ||
/* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
((state^info->state) & 0xfffffe00) ((state^info->state) & 0xfffffe00)
) )
...@@ -5215,10 +5165,15 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) ...@@ -5215,10 +5165,15 @@ static int update_array_info(mddev_t *mddev, mdu_array_info_t *info)
* we don't need to do anything at the md level, the * we don't need to do anything at the md level, the
* personality will take care of it all. * personality will take care of it all.
*/ */
if (mddev->pers->reconfig == NULL) if (mddev->pers->check_reshape == NULL)
return -EINVAL; return -EINVAL;
else else {
return mddev->pers->reconfig(mddev, info->layout, -1); mddev->new_layout = info->layout;
rv = mddev->pers->check_reshape(mddev);
if (rv)
mddev->new_layout = mddev->layout;
return rv;
}
} }
if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) if (info->size >= 0 && mddev->dev_sectors / 2 != info->size)
rv = update_size(mddev, (sector_t)info->size * 2); rv = update_size(mddev, (sector_t)info->size * 2);
...@@ -6717,7 +6672,8 @@ void md_check_recovery(mddev_t *mddev) ...@@ -6717,7 +6672,8 @@ void md_check_recovery(mddev_t *mddev)
*/ */
if (mddev->reshape_position != MaxSector) { if (mddev->reshape_position != MaxSector) {
if (mddev->pers->check_reshape(mddev) != 0) if (mddev->pers->check_reshape == NULL ||
mddev->pers->check_reshape(mddev) != 0)
/* Cannot proceed */ /* Cannot proceed */
goto unlock; goto unlock;
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
......
...@@ -29,13 +29,6 @@ ...@@ -29,13 +29,6 @@
typedef struct mddev_s mddev_t; typedef struct mddev_s mddev_t;
typedef struct mdk_rdev_s mdk_rdev_t; typedef struct mdk_rdev_s mdk_rdev_t;
/*
* options passed in raidrun:
*/
/* Currently this must fit in an 'int' */
#define MAX_CHUNK_SIZE (1<<30)
/* /*
* MD's 'extended' device * MD's 'extended' device
*/ */
...@@ -145,7 +138,7 @@ struct mddev_s ...@@ -145,7 +138,7 @@ struct mddev_s
int external; /* metadata is int external; /* metadata is
* managed externally */ * managed externally */
char metadata_type[17]; /* externally set*/ char metadata_type[17]; /* externally set*/
int chunk_size; int chunk_sectors;
time_t ctime, utime; time_t ctime, utime;
int level, layout; int level, layout;
char clevel[16]; char clevel[16];
...@@ -166,7 +159,8 @@ struct mddev_s ...@@ -166,7 +159,8 @@ struct mddev_s
* If reshape_position is MaxSector, then no reshape is happening (yet). * If reshape_position is MaxSector, then no reshape is happening (yet).
*/ */
sector_t reshape_position; sector_t reshape_position;
int delta_disks, new_level, new_layout, new_chunk; int delta_disks, new_level, new_layout;
int new_chunk_sectors;
struct mdk_thread_s *thread; /* management thread */ struct mdk_thread_s *thread; /* management thread */
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
...@@ -325,7 +319,6 @@ struct mdk_personality ...@@ -325,7 +319,6 @@ struct mdk_personality
int (*check_reshape) (mddev_t *mddev); int (*check_reshape) (mddev_t *mddev);
int (*start_reshape) (mddev_t *mddev); int (*start_reshape) (mddev_t *mddev);
void (*finish_reshape) (mddev_t *mddev); void (*finish_reshape) (mddev_t *mddev);
int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
/* quiesce moves between quiescence states /* quiesce moves between quiescence states
* 0 - fully active * 0 - fully active
* 1 - no new requests allowed * 1 - no new requests allowed
...@@ -437,5 +430,6 @@ extern void md_new_event(mddev_t *mddev); ...@@ -437,5 +430,6 @@ extern void md_new_event(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev); extern int md_allow_write(mddev_t *mddev);
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors); extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(mddev_t *mddev);
#endif /* _MD_MD_H */ #endif /* _MD_MD_H */
...@@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) ...@@ -58,7 +58,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = mp_bh->mddev; mddev_t *mddev = mp_bh->mddev;
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
list_add(&mp_bh->retry_list, &conf->retry_list); list_add(&mp_bh->retry_list, &conf->retry_list);
...@@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) ...@@ -75,7 +75,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
{ {
struct bio *bio = mp_bh->master_bio; struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); multipath_conf_t *conf = mp_bh->mddev->private;
bio_endio(bio, err); bio_endio(bio, err);
mempool_free(mp_bh, conf->pool); mempool_free(mp_bh, conf->pool);
...@@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error) ...@@ -85,7 +85,7 @@ static void multipath_end_request(struct bio *bio, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private); struct multipath_bh * mp_bh = (struct multipath_bh *)(bio->bi_private);
multipath_conf_t *conf = mddev_to_conf(mp_bh->mddev); multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (uptodate) if (uptodate)
...@@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error) ...@@ -107,7 +107,7 @@ static void multipath_end_request(struct bio *bio, int error)
static void unplug_slaves(mddev_t *mddev) static void unplug_slaves(mddev_t *mddev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
int i; int i;
rcu_read_lock(); rcu_read_lock();
...@@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q) ...@@ -138,7 +138,7 @@ static void multipath_unplug(struct request_queue *q)
static int multipath_make_request (struct request_queue *q, struct bio * bio) static int multipath_make_request (struct request_queue *q, struct bio * bio)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh; struct multipath_bh * mp_bh;
struct multipath_info *multipath; struct multipath_info *multipath;
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
...@@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) ...@@ -180,7 +180,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
static void multipath_status (struct seq_file *seq, mddev_t *mddev) static void multipath_status (struct seq_file *seq, mddev_t *mddev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
int i; int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks, seq_printf (seq, " [%d/%d] [", conf->raid_disks,
...@@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) ...@@ -195,7 +195,7 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
static int multipath_congested(void *data, int bits) static int multipath_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits) ...@@ -220,7 +220,7 @@ static int multipath_congested(void *data, int bits)
*/ */
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
if (conf->working_disks <= 1) { if (conf->working_disks <= 1) {
/* /*
...@@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev) ...@@ -367,7 +367,7 @@ static void multipathd (mddev_t *mddev)
struct multipath_bh *mp_bh; struct multipath_bh *mp_bh;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
md_check_recovery(mddev); md_check_recovery(mddev);
...@@ -421,6 +421,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -421,6 +421,9 @@ static int multipath_run (mddev_t *mddev)
struct multipath_info *disk; struct multipath_info *disk;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
if (md_check_no_bitmap(mddev))
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) { if (mddev->level != LEVEL_MULTIPATH) {
printk("multipath: %s: raid level not set to multipath IO (%d)\n", printk("multipath: %s: raid level not set to multipath IO (%d)\n",
mdname(mddev), mddev->level); mdname(mddev), mddev->level);
...@@ -531,7 +534,7 @@ static int multipath_run (mddev_t *mddev) ...@@ -531,7 +534,7 @@ static int multipath_run (mddev_t *mddev)
static int multipath_stop (mddev_t *mddev) static int multipath_stop (mddev_t *mddev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread); md_unregister_thread(mddev->thread);
mddev->thread = NULL; mddev->thread = NULL;
......
...@@ -18,12 +18,6 @@ struct multipath_private_data { ...@@ -18,12 +18,6 @@ struct multipath_private_data {
typedef struct multipath_private_data multipath_conf_t; typedef struct multipath_private_data multipath_conf_t;
/*
* this is the only point in the RAID code where we violate
* C type safety. mddev->private is an 'opaque' pointer.
*/
#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
/* /*
* this is our 'private' 'collective' MULTIPATH buffer head. * this is our 'private' 'collective' MULTIPATH buffer head.
* it contains information about what kind of IO operations were started * it contains information about what kind of IO operations were started
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
static void raid0_unplug(struct request_queue *q) static void raid0_unplug(struct request_queue *q)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
raid0_conf_t *conf = mddev_to_conf(mddev); raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->strip_zone[0].dev; mdk_rdev_t **devlist = conf->devlist;
int i; int i;
for (i=0; i<mddev->raid_disks; i++) { for (i=0; i<mddev->raid_disks; i++) {
...@@ -40,8 +40,8 @@ static void raid0_unplug(struct request_queue *q) ...@@ -40,8 +40,8 @@ static void raid0_unplug(struct request_queue *q)
static int raid0_congested(void *data, int bits) static int raid0_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
raid0_conf_t *conf = mddev_to_conf(mddev); raid0_conf_t *conf = mddev->private;
mdk_rdev_t **devlist = conf->strip_zone[0].dev; mdk_rdev_t **devlist = conf->devlist;
int i, ret = 0; int i, ret = 0;
for (i = 0; i < mddev->raid_disks && !ret ; i++) { for (i = 0; i < mddev->raid_disks && !ret ; i++) {
...@@ -52,27 +52,60 @@ static int raid0_congested(void *data, int bits) ...@@ -52,27 +52,60 @@ static int raid0_congested(void *data, int bits)
return ret; return ret;
} }
/*
* inform the user of the raid configuration
*/
static void dump_zones(mddev_t *mddev)
{
int j, k, h;
sector_t zone_size = 0;
sector_t zone_start = 0;
char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev->private;
printk(KERN_INFO "******* %s configuration *********\n",
mdname(mddev));
h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) {
printk(KERN_INFO "zone%d=[", j);
for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
printk("%s/",
bdevname(conf->devlist[j*mddev->raid_disks
+ k]->bdev, b));
printk("]\n");
zone_size = conf->strip_zone[j].zone_end - zone_start;
printk(KERN_INFO " zone offset=%llukb "
"device offset=%llukb size=%llukb\n",
(unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
zone_start = conf->strip_zone[j].zone_end;
}
printk(KERN_INFO "**********************************\n\n");
}
static int create_strip_zones (mddev_t *mddev) static int create_strip_zones(mddev_t *mddev)
{ {
int i, c, j; int i, c, j, err;
sector_t current_start, curr_zone_start; sector_t curr_zone_end, sectors;
sector_t min_spacing; mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev, **dev;
raid0_conf_t *conf = mddev_to_conf(mddev);
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
struct strip_zone *zone; struct strip_zone *zone;
int cnt; int cnt;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
raid0_conf_t *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
/*
* The number of 'same size groups' if (!conf)
*/ return -ENOMEM;
conf->nr_strip_zones = 0;
list_for_each_entry(rdev1, &mddev->disks, same_set) { list_for_each_entry(rdev1, &mddev->disks, same_set) {
printk(KERN_INFO "raid0: looking at %s\n", printk(KERN_INFO "raid0: looking at %s\n",
bdevname(rdev1->bdev,b)); bdevname(rdev1->bdev,b));
c = 0; c = 0;
/* round size to chunk_size */
sectors = rdev1->sectors;
sector_div(sectors, mddev->chunk_sectors);
rdev1->sectors = sectors * mddev->chunk_sectors;
list_for_each_entry(rdev2, &mddev->disks, same_set) { list_for_each_entry(rdev2, &mddev->disks, same_set) {
printk(KERN_INFO "raid0: comparing %s(%llu)", printk(KERN_INFO "raid0: comparing %s(%llu)",
bdevname(rdev1->bdev,b), bdevname(rdev1->bdev,b),
...@@ -103,16 +136,16 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -103,16 +136,16 @@ static int create_strip_zones (mddev_t *mddev)
} }
} }
printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones);
err = -ENOMEM;
conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
conf->nr_strip_zones, GFP_KERNEL); conf->nr_strip_zones, GFP_KERNEL);
if (!conf->strip_zone) if (!conf->strip_zone)
return 1; goto abort;
conf->devlist = kzalloc(sizeof(mdk_rdev_t*)* conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
conf->nr_strip_zones*mddev->raid_disks, conf->nr_strip_zones*mddev->raid_disks,
GFP_KERNEL); GFP_KERNEL);
if (!conf->devlist) if (!conf->devlist)
return 1; goto abort;
/* The first zone must contain all devices, so here we check that /* The first zone must contain all devices, so here we check that
* there is a proper alignment of slots to devices and find them all * there is a proper alignment of slots to devices and find them all
...@@ -120,7 +153,8 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -120,7 +153,8 @@ static int create_strip_zones (mddev_t *mddev)
zone = &conf->strip_zone[0]; zone = &conf->strip_zone[0];
cnt = 0; cnt = 0;
smallest = NULL; smallest = NULL;
zone->dev = conf->devlist; dev = conf->devlist;
err = -EINVAL;
list_for_each_entry(rdev1, &mddev->disks, same_set) { list_for_each_entry(rdev1, &mddev->disks, same_set) {
int j = rdev1->raid_disk; int j = rdev1->raid_disk;
...@@ -129,12 +163,12 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -129,12 +163,12 @@ static int create_strip_zones (mddev_t *mddev)
"aborting!\n", j); "aborting!\n", j);
goto abort; goto abort;
} }
if (zone->dev[j]) { if (dev[j]) {
printk(KERN_ERR "raid0: multiple devices for %d - " printk(KERN_ERR "raid0: multiple devices for %d - "
"aborting!\n", j); "aborting!\n", j);
goto abort; goto abort;
} }
zone->dev[j] = rdev1; dev[j] = rdev1;
blk_queue_stack_limits(mddev->queue, blk_queue_stack_limits(mddev->queue,
rdev1->bdev->bd_disk->queue); rdev1->bdev->bd_disk->queue);
...@@ -157,34 +191,32 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -157,34 +191,32 @@ static int create_strip_zones (mddev_t *mddev)
goto abort; goto abort;
} }
zone->nb_dev = cnt; zone->nb_dev = cnt;
zone->sectors = smallest->sectors * cnt; zone->zone_end = smallest->sectors * cnt;
zone->zone_start = 0;
current_start = smallest->sectors; curr_zone_end = zone->zone_end;
curr_zone_start = zone->sectors;
/* now do the other zones */ /* now do the other zones */
for (i = 1; i < conf->nr_strip_zones; i++) for (i = 1; i < conf->nr_strip_zones; i++)
{ {
zone = conf->strip_zone + i; zone = conf->strip_zone + i;
zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; dev = conf->devlist + i * mddev->raid_disks;
printk(KERN_INFO "raid0: zone %d\n", i); printk(KERN_INFO "raid0: zone %d\n", i);
zone->dev_start = current_start; zone->dev_start = smallest->sectors;
smallest = NULL; smallest = NULL;
c = 0; c = 0;
for (j=0; j<cnt; j++) { for (j=0; j<cnt; j++) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
rdev = conf->strip_zone[0].dev[j]; rdev = conf->devlist[j];
printk(KERN_INFO "raid0: checking %s ...", printk(KERN_INFO "raid0: checking %s ...",
bdevname(rdev->bdev, b)); bdevname(rdev->bdev, b));
if (rdev->sectors <= current_start) { if (rdev->sectors <= zone->dev_start) {
printk(KERN_INFO " nope.\n"); printk(KERN_INFO " nope.\n");
continue; continue;
} }
printk(KERN_INFO " contained as device %d\n", c); printk(KERN_INFO " contained as device %d\n", c);
zone->dev[c] = rdev; dev[c] = rdev;
c++; c++;
if (!smallest || rdev->sectors < smallest->sectors) { if (!smallest || rdev->sectors < smallest->sectors) {
smallest = rdev; smallest = rdev;
...@@ -194,47 +226,39 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -194,47 +226,39 @@ static int create_strip_zones (mddev_t *mddev)
} }
zone->nb_dev = c; zone->nb_dev = c;
zone->sectors = (smallest->sectors - current_start) * c; sectors = (smallest->sectors - zone->dev_start) * c;
printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n",
zone->nb_dev, (unsigned long long)zone->sectors); zone->nb_dev, (unsigned long long)sectors);
zone->zone_start = curr_zone_start; curr_zone_end += sectors;
curr_zone_start += zone->sectors; zone->zone_end = curr_zone_end;
current_start = smallest->sectors;
printk(KERN_INFO "raid0: current zone start: %llu\n", printk(KERN_INFO "raid0: current zone start: %llu\n",
(unsigned long long)current_start); (unsigned long long)smallest->sectors);
}
/* Now find appropriate hash spacing.
* We want a number which causes most hash entries to cover
* at most two strips, but the hash table must be at most
* 1 PAGE. We choose the smallest strip, or contiguous collection
* of strips, that has big enough size. We never consider the last
* strip though as it's size has no bearing on the efficacy of the hash
* table.
*/
conf->spacing = curr_zone_start;
min_spacing = curr_zone_start;
sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
for (i=0; i < conf->nr_strip_zones-1; i++) {
sector_t s = 0;
for (j = i; j < conf->nr_strip_zones - 1 &&
s < min_spacing; j++)
s += conf->strip_zone[j].sectors;
if (s >= min_spacing && s < conf->spacing)
conf->spacing = s;
} }
mddev->queue->unplug_fn = raid0_unplug; mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev; mddev->queue->backing_dev_info.congested_data = mddev;
/*
* now since we have the hard sector sizes, we can make sure
* chunk size is a multiple of that sector size
*/
if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
printk(KERN_ERR "%s chunk_size of %d not valid\n",
mdname(mddev),
mddev->chunk_sectors << 9);
goto abort;
}
printk(KERN_INFO "raid0: done.\n"); printk(KERN_INFO "raid0: done.\n");
mddev->private = conf;
return 0; return 0;
abort: abort:
return 1; kfree(conf->strip_zone);
kfree(conf->devlist);
kfree(conf);
mddev->private = NULL;
return err;
} }
/** /**
...@@ -252,10 +276,15 @@ static int raid0_mergeable_bvec(struct request_queue *q, ...@@ -252,10 +276,15 @@ static int raid0_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; if (is_power_of_2(chunk_sectors))
max = (chunk_sectors - ((sector & (chunk_sectors-1))
+ bio_sectors)) << 9;
else
max = (chunk_sectors - (sector_div(sector, chunk_sectors)
+ bio_sectors)) << 9;
if (max < 0) max = 0; /* bio_add cannot handle a negative return */ if (max < 0) max = 0; /* bio_add cannot handle a negative return */
if (max <= biovec->bv_len && bio_sectors == 0) if (max <= biovec->bv_len && bio_sectors == 0)
return biovec->bv_len; return biovec->bv_len;
...@@ -277,84 +306,28 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -277,84 +306,28 @@ static sector_t raid0_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return array_sectors; return array_sectors;
} }
static int raid0_run (mddev_t *mddev) static int raid0_run(mddev_t *mddev)
{ {
unsigned cur=0, i=0, nb_zone; int ret;
s64 sectors;
raid0_conf_t *conf;
if (mddev->chunk_size == 0) { if (mddev->chunk_sectors == 0) {
printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); printk(KERN_ERR "md/raid0: chunk size must be set.\n");
return -EINVAL; return -EINVAL;
} }
printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n", if (md_check_no_bitmap(mddev))
mdname(mddev), return -EINVAL;
mddev->chunk_size >> 9, blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors);
(mddev->chunk_size>>1)-1);
blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
mddev->queue->queue_lock = &mddev->queue->__queue_lock; mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL); ret = create_strip_zones(mddev);
if (!conf) if (ret < 0)
goto out; return ret;
mddev->private = (void *)conf;
conf->strip_zone = NULL;
conf->devlist = NULL;
if (create_strip_zones (mddev))
goto out_free_conf;
/* calculate array device size */ /* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0)); md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", printk(KERN_INFO "raid0 : md_size is %llu sectors.\n",
(unsigned long long)mddev->array_sectors); (unsigned long long)mddev->array_sectors);
printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n",
(unsigned long long)conf->spacing);
{
sector_t s = raid0_size(mddev, 0, 0);
sector_t space = conf->spacing;
int round;
conf->sector_shift = 0;
if (sizeof(sector_t) > sizeof(u32)) {
/*shift down space and s so that sector_div will work */
while (space > (sector_t) (~(u32)0)) {
s >>= 1;
space >>= 1;
s += 1; /* force round-up */
conf->sector_shift++;
}
}
round = sector_div(s, (u32)space) ? 1 : 0;
nb_zone = s + round;
}
printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone);
printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n",
nb_zone*sizeof(struct strip_zone*));
conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
if (!conf->hash_table)
goto out_free_conf;
sectors = conf->strip_zone[cur].sectors;
conf->hash_table[0] = conf->strip_zone + cur;
for (i=1; i< nb_zone; i++) {
while (sectors <= conf->spacing) {
cur++;
sectors += conf->strip_zone[cur].sectors;
}
sectors -= conf->spacing;
conf->hash_table[i] = conf->strip_zone + cur;
}
if (conf->sector_shift) {
conf->spacing >>= conf->sector_shift;
/* round spacing up so when we divide by it, we
* err on the side of too-low, which is safest
*/
conf->spacing++;
}
/* calculate the max read-ahead size. /* calculate the max read-ahead size.
* For read-ahead of large files to be effective, we need to * For read-ahead of large files to be effective, we need to
* readahead at least twice a whole stripe. i.e. number of devices * readahead at least twice a whole stripe. i.e. number of devices
...@@ -365,48 +338,107 @@ static int raid0_run (mddev_t *mddev) ...@@ -365,48 +338,107 @@ static int raid0_run (mddev_t *mddev)
* chunksize should be used in that case. * chunksize should be used in that case.
*/ */
{ {
int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE; int stripe = mddev->raid_disks *
(mddev->chunk_sectors << 9) / PAGE_SIZE;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info.ra_pages = 2* stripe;
} }
blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
dump_zones(mddev);
return 0; return 0;
}
out_free_conf: static int raid0_stop(mddev_t *mddev)
{
raid0_conf_t *conf = mddev->private;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
kfree(conf->strip_zone); kfree(conf->strip_zone);
kfree(conf->devlist); kfree(conf->devlist);
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
out: return 0;
return -ENOMEM;
} }
static int raid0_stop (mddev_t *mddev) /* Find the zone which holds a particular offset
* Update *sectorp to be an offset in that zone
*/
static struct strip_zone *find_zone(struct raid0_private_data *conf,
sector_t *sectorp)
{ {
raid0_conf_t *conf = mddev_to_conf(mddev); int i;
struct strip_zone *z = conf->strip_zone;
sector_t sector = *sectorp;
for (i = 0; i < conf->nr_strip_zones; i++)
if (sector < z[i].zone_end) {
if (i)
*sectorp = sector - z[i-1].zone_end;
return z + i;
}
BUG();
}
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ /*
kfree(conf->hash_table); * remaps the bio to the target device. we separate two flows.
conf->hash_table = NULL; * power 2 flow and a general flow for the sake of perfromance
kfree(conf->strip_zone); */
conf->strip_zone = NULL; static mdk_rdev_t *map_sector(mddev_t *mddev, struct strip_zone *zone,
kfree(conf); sector_t sector, sector_t *sector_offset)
mddev->private = NULL; {
unsigned int sect_in_chunk;
sector_t chunk;
raid0_conf_t *conf = mddev->private;
unsigned int chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) {
int chunksect_bits = ffz(~chunk_sects);
/* find the sector offset inside the chunk */
sect_in_chunk = sector & (chunk_sects - 1);
sector >>= chunksect_bits;
/* chunk in zone */
chunk = *sector_offset;
/* quotient is the chunk in real device*/
sector_div(chunk, zone->nb_dev << chunksect_bits);
} else{
sect_in_chunk = sector_div(sector, chunk_sects);
chunk = *sector_offset;
sector_div(chunk, chunk_sects * zone->nb_dev);
}
/*
* position the bio over the real device
* real sector = chunk in device + starting of zone
* + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk;
return conf->devlist[(zone - conf->strip_zone)*mddev->raid_disks
+ sector_div(sector, zone->nb_dev)];
}
return 0; /*
* Is io distribute over 1 or more chunks ?
*/
static inline int is_io_in_chunk_boundary(mddev_t *mddev,
unsigned int chunk_sects, struct bio *bio)
{
if (likely(is_power_of_2(chunk_sects))) {
return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ (bio->bi_size >> 9));
} else{
sector_t sector = bio->bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects)
+ (bio->bi_size >> 9));
}
} }
static int raid0_make_request (struct request_queue *q, struct bio *bio) static int raid0_make_request(struct request_queue *q, struct bio *bio)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksect_bits, chunk_sects; unsigned int chunk_sects;
raid0_conf_t *conf = mddev_to_conf(mddev); sector_t sector_offset;
struct strip_zone *zone; struct strip_zone *zone;
mdk_rdev_t *tmp_dev; mdk_rdev_t *tmp_dev;
sector_t chunk;
sector_t sector, rsect;
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
int cpu; int cpu;
...@@ -421,11 +453,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) ...@@ -421,11 +453,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
bio_sectors(bio)); bio_sectors(bio));
part_stat_unlock(); part_stat_unlock();
chunk_sects = mddev->chunk_size >> 9; chunk_sects = mddev->chunk_sectors;
chunksect_bits = ffz(~chunk_sects); if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
sector = bio->bi_sector; sector_t sector = bio->bi_sector;
if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
struct bio_pair *bp; struct bio_pair *bp;
/* Sanity check -- queue functions should prevent this happening */ /* Sanity check -- queue functions should prevent this happening */
if (bio->bi_vcnt != 1 || if (bio->bi_vcnt != 1 ||
...@@ -434,7 +464,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) ...@@ -434,7 +464,12 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
/* This is a one page bio that upper layers /* This is a one page bio that upper layers
* refuse to split for us, so we need to split it. * refuse to split for us, so we need to split it.
*/ */
bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1))); if (likely(is_power_of_2(chunk_sects)))
bp = bio_split(bio, chunk_sects - (sector &
(chunk_sects-1)));
else
bp = bio_split(bio, chunk_sects -
sector_div(sector, chunk_sects));
if (raid0_make_request(q, &bp->bio1)) if (raid0_make_request(q, &bp->bio1))
generic_make_request(&bp->bio1); generic_make_request(&bp->bio1);
if (raid0_make_request(q, &bp->bio2)) if (raid0_make_request(q, &bp->bio2))
...@@ -443,34 +478,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) ...@@ -443,34 +478,14 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
bio_pair_release(bp); bio_pair_release(bp);
return 0; return 0;
} }
{
sector_t x = sector >> conf->sector_shift;
sector_div(x, (u32)conf->spacing);
zone = conf->hash_table[x];
}
while (sector >= zone->zone_start + zone->sectors) sector_offset = bio->bi_sector;
zone++; zone = find_zone(mddev->private, &sector_offset);
tmp_dev = map_sector(mddev, zone, bio->bi_sector,
sect_in_chunk = bio->bi_sector & (chunk_sects - 1); &sector_offset);
{
sector_t x = (sector - zone->zone_start) >> chunksect_bits;
sector_div(x, zone->nb_dev);
chunk = x;
x = sector >> chunksect_bits;
tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
}
rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk;
bio->bi_bdev = tmp_dev->bdev; bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = rsect + tmp_dev->data_offset; bio->bi_sector = sector_offset + zone->dev_start +
tmp_dev->data_offset;
/* /*
* Let the main block layer submit the IO and resolve recursion: * Let the main block layer submit the IO and resolve recursion:
*/ */
...@@ -485,31 +500,35 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) ...@@ -485,31 +500,35 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio)
return 0; return 0;
} }
static void raid0_status (struct seq_file *seq, mddev_t *mddev) static void raid0_status(struct seq_file *seq, mddev_t *mddev)
{ {
#undef MD_DEBUG #undef MD_DEBUG
#ifdef MD_DEBUG #ifdef MD_DEBUG
int j, k, h; int j, k, h;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
raid0_conf_t *conf = mddev_to_conf(mddev); raid0_conf_t *conf = mddev->private;
sector_t zone_size;
sector_t zone_start = 0;
h = 0; h = 0;
for (j = 0; j < conf->nr_strip_zones; j++) { for (j = 0; j < conf->nr_strip_zones; j++) {
seq_printf(seq, " z%d", j); seq_printf(seq, " z%d", j);
if (conf->hash_table[h] == conf->strip_zone+j)
seq_printf(seq, "(h%d)", h++);
seq_printf(seq, "=["); seq_printf(seq, "=[");
for (k = 0; k < conf->strip_zone[j].nb_dev; k++) for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
seq_printf(seq, "%s/", bdevname( seq_printf(seq, "%s/", bdevname(
conf->strip_zone[j].dev[k]->bdev,b)); conf->devlist[j*mddev->raid_disks + k]
->bdev, b));
seq_printf(seq, "] zs=%d ds=%d s=%d\n",
conf->strip_zone[j].zone_start, zone_size = conf->strip_zone[j].zone_end - zone_start;
conf->strip_zone[j].dev_start, seq_printf(seq, "] ze=%lld ds=%lld s=%lld\n",
conf->strip_zone[j].sectors); (unsigned long long)zone_start>>1,
(unsigned long long)conf->strip_zone[j].dev_start>>1,
(unsigned long long)zone_size>>1);
zone_start = conf->strip_zone[j].zone_end;
} }
#endif #endif
seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
return; return;
} }
......
...@@ -3,26 +3,18 @@ ...@@ -3,26 +3,18 @@
struct strip_zone struct strip_zone
{ {
sector_t zone_start; /* Zone offset in md_dev (in sectors) */ sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */ sector_t dev_start; /* Zone offset in real dev (in sectors) */
sector_t sectors; /* Zone size in sectors */
int nb_dev; /* # of devices attached to the zone */ int nb_dev; /* # of devices attached to the zone */
mdk_rdev_t **dev; /* Devices attached to the zone */
}; };
struct raid0_private_data struct raid0_private_data
{ {
struct strip_zone **hash_table; /* Table of indexes into strip_zone */
struct strip_zone *strip_zone; struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones; int nr_strip_zones;
sector_t spacing;
int sector_shift; /* shift this before divide by spacing */
}; };
typedef struct raid0_private_data raid0_conf_t; typedef struct raid0_private_data raid0_conf_t;
#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
#endif #endif
...@@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) ...@@ -182,7 +182,7 @@ static void put_all_bios(conf_t *conf, r1bio_t *r1_bio)
static void free_r1bio(r1bio_t *r1_bio) static void free_r1bio(r1bio_t *r1_bio)
{ {
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = r1_bio->mddev->private;
/* /*
* Wake up any possible resync thread that waits for the device * Wake up any possible resync thread that waits for the device
...@@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio) ...@@ -196,7 +196,7 @@ static void free_r1bio(r1bio_t *r1_bio)
static void put_buf(r1bio_t *r1_bio) static void put_buf(r1bio_t *r1_bio)
{ {
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = r1_bio->mddev->private;
int i; int i;
for (i=0; i<conf->raid_disks; i++) { for (i=0; i<conf->raid_disks; i++) {
...@@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio) ...@@ -214,7 +214,7 @@ static void reschedule_retry(r1bio_t *r1_bio)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = r1_bio->mddev; mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r1_bio->retry_list, &conf->retry_list); list_add(&r1_bio->retry_list, &conf->retry_list);
...@@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio) ...@@ -253,7 +253,7 @@ static void raid_end_bio_io(r1bio_t *r1_bio)
*/ */
static inline void update_head_pos(int disk, r1bio_t *r1_bio) static inline void update_head_pos(int disk, r1bio_t *r1_bio)
{ {
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = r1_bio->mddev->private;
conf->mirrors[disk].head_position = conf->mirrors[disk].head_position =
r1_bio->sector + (r1_bio->sectors); r1_bio->sector + (r1_bio->sectors);
...@@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error) ...@@ -264,7 +264,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror; int mirror;
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = r1_bio->mddev->private;
mirror = r1_bio->read_disk; mirror = r1_bio->read_disk;
/* /*
...@@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error) ...@@ -309,7 +309,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
conf_t *conf = mddev_to_conf(r1_bio->mddev); conf_t *conf = r1_bio->mddev->private;
struct bio *to_put = NULL; struct bio *to_put = NULL;
...@@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) ...@@ -541,7 +541,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
static void unplug_slaves(mddev_t *mddev) static void unplug_slaves(mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
rcu_read_lock(); rcu_read_lock();
...@@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q) ...@@ -573,7 +573,7 @@ static void raid1_unplug(struct request_queue *q)
static int raid1_congested(void *data, int bits) static int raid1_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -772,7 +772,7 @@ static struct page **alloc_behind_pages(struct bio *bio) ...@@ -772,7 +772,7 @@ static struct page **alloc_behind_pages(struct bio *bio)
static int make_request(struct request_queue *q, struct bio * bio) static int make_request(struct request_queue *q, struct bio * bio)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
mirror_info_t *mirror; mirror_info_t *mirror;
r1bio_t *r1_bio; r1bio_t *r1_bio;
struct bio *read_bio; struct bio *read_bio;
...@@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio) ...@@ -991,7 +991,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
static void status(struct seq_file *seq, mddev_t *mddev) static void status(struct seq_file *seq, mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
seq_printf(seq, " [%d/%d] [", conf->raid_disks, seq_printf(seq, " [%d/%d] [", conf->raid_disks,
...@@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -1010,7 +1010,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
static void error(mddev_t *mddev, mdk_rdev_t *rdev) static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{ {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
/* /*
* If it is not operational, then we have already marked it as dead * If it is not operational, then we have already marked it as dead
...@@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1214,7 +1214,7 @@ static void end_sync_write(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
mddev_t *mddev = r1_bio->mddev; mddev_t *mddev = r1_bio->mddev;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
int mirror=0; int mirror=0;
...@@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1248,7 +1248,7 @@ static void end_sync_write(struct bio *bio, int error)
static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
int disks = conf->raid_disks; int disks = conf->raid_disks;
struct bio *bio, *wbio; struct bio *bio, *wbio;
...@@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev) ...@@ -1562,7 +1562,7 @@ static void raid1d(mddev_t *mddev)
r1bio_t *r1_bio; r1bio_t *r1_bio;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
int unplug=0; int unplug=0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev) ...@@ -1585,7 +1585,7 @@ static void raid1d(mddev_t *mddev)
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r1_bio->mddev; mddev = r1_bio->mddev;
conf = mddev_to_conf(mddev); conf = mddev->private;
if (test_bit(R1BIO_IsSync, &r1_bio->state)) { if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
sync_request_write(mddev, r1_bio); sync_request_write(mddev, r1_bio);
unplug = 1; unplug = 1;
...@@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf) ...@@ -1706,7 +1706,7 @@ static int init_resync(conf_t *conf)
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
r1bio_t *r1_bio; r1bio_t *r1_bio;
struct bio *bio; struct bio *bio;
sector_t max_sector, nr_sectors; sector_t max_sector, nr_sectors;
...@@ -2052,6 +2052,10 @@ static int run(mddev_t *mddev) ...@@ -2052,6 +2052,10 @@ static int run(mddev_t *mddev)
goto out_free_conf; goto out_free_conf;
} }
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "raid1: %s is not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO printk(KERN_INFO
"raid1: raid set %s active with %d out of %d mirrors\n", "raid1: raid set %s active with %d out of %d mirrors\n",
mdname(mddev), mddev->raid_disks - mddev->degraded, mdname(mddev), mddev->raid_disks - mddev->degraded,
...@@ -2087,7 +2091,7 @@ static int run(mddev_t *mddev) ...@@ -2087,7 +2091,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev) static int stop(mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
struct bitmap *bitmap = mddev->bitmap; struct bitmap *bitmap = mddev->bitmap;
int behind_wait = 0; int behind_wait = 0;
...@@ -2155,16 +2159,16 @@ static int raid1_reshape(mddev_t *mddev) ...@@ -2155,16 +2159,16 @@ static int raid1_reshape(mddev_t *mddev)
mempool_t *newpool, *oldpool; mempool_t *newpool, *oldpool;
struct pool_info *newpoolinfo; struct pool_info *newpoolinfo;
mirror_info_t *newmirrors; mirror_info_t *newmirrors;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int cnt, raid_disks; int cnt, raid_disks;
unsigned long flags; unsigned long flags;
int d, d2, err; int d, d2, err;
/* Cannot change chunk_size, layout, or level */ /* Cannot change chunk_size, layout, or level */
if (mddev->chunk_size != mddev->new_chunk || if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
mddev->layout != mddev->new_layout || mddev->layout != mddev->new_layout ||
mddev->level != mddev->new_level) { mddev->level != mddev->new_level) {
mddev->new_chunk = mddev->chunk_size; mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->new_layout = mddev->layout; mddev->new_layout = mddev->layout;
mddev->new_level = mddev->level; mddev->new_level = mddev->level;
return -EINVAL; return -EINVAL;
...@@ -2252,7 +2256,7 @@ static int raid1_reshape(mddev_t *mddev) ...@@ -2252,7 +2256,7 @@ static int raid1_reshape(mddev_t *mddev)
static void raid1_quiesce(mddev_t *mddev, int state) static void raid1_quiesce(mddev_t *mddev, int state)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
switch(state) { switch(state) {
case 1: case 1:
......
...@@ -63,12 +63,6 @@ struct r1_private_data_s { ...@@ -63,12 +63,6 @@ struct r1_private_data_s {
typedef struct r1_private_data_s conf_t; typedef struct r1_private_data_s conf_t;
/*
* this is the only point in the RAID code where we violate
* C type safety. mddev->private is an 'opaque' pointer.
*/
#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
/* /*
* this is our 'private' RAID1 bio. * this is our 'private' RAID1 bio.
* *
......
...@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio) ...@@ -188,7 +188,7 @@ static void put_all_bios(conf_t *conf, r10bio_t *r10_bio)
static void free_r10bio(r10bio_t *r10_bio) static void free_r10bio(r10bio_t *r10_bio)
{ {
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
/* /*
* Wake up any possible resync thread that waits for the device * Wake up any possible resync thread that waits for the device
...@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio) ...@@ -202,7 +202,7 @@ static void free_r10bio(r10bio_t *r10_bio)
static void put_buf(r10bio_t *r10_bio) static void put_buf(r10bio_t *r10_bio)
{ {
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
mempool_free(r10_bio, conf->r10buf_pool); mempool_free(r10_bio, conf->r10buf_pool);
...@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio) ...@@ -213,7 +213,7 @@ static void reschedule_retry(r10bio_t *r10_bio)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = r10_bio->mddev; mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
list_add(&r10_bio->retry_list, &conf->retry_list); list_add(&r10_bio->retry_list, &conf->retry_list);
...@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio) ...@@ -245,7 +245,7 @@ static void raid_end_bio_io(r10bio_t *r10_bio)
*/ */
static inline void update_head_pos(int slot, r10bio_t *r10_bio) static inline void update_head_pos(int slot, r10bio_t *r10_bio)
{ {
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
conf->mirrors[r10_bio->devs[slot].devnum].head_position = conf->mirrors[r10_bio->devs[slot].devnum].head_position =
r10_bio->devs[slot].addr + (r10_bio->sectors); r10_bio->devs[slot].addr + (r10_bio->sectors);
...@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error) ...@@ -256,7 +256,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev; int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot; slot = r10_bio->read_slot;
...@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error) ...@@ -297,7 +297,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
int slot, dev; int slot, dev;
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
for (slot = 0; slot < conf->copies; slot++) for (slot = 0; slot < conf->copies; slot++)
if (r10_bio->devs[slot].bio == bio) if (r10_bio->devs[slot].bio == bio)
...@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, ...@@ -461,7 +461,7 @@ static int raid10_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
...@@ -596,7 +596,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) ...@@ -596,7 +596,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
static void unplug_slaves(mddev_t *mddev) static void unplug_slaves(mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
rcu_read_lock(); rcu_read_lock();
...@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q) ...@@ -628,7 +628,7 @@ static void raid10_unplug(struct request_queue *q)
static int raid10_congested(void *data, int bits) static int raid10_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
rcu_read_lock(); rcu_read_lock();
...@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf) ...@@ -788,7 +788,7 @@ static void unfreeze_array(conf_t *conf)
static int make_request(struct request_queue *q, struct bio * bio) static int make_request(struct request_queue *q, struct bio * bio)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
mirror_info_t *mirror; mirror_info_t *mirror;
r10bio_t *r10_bio; r10bio_t *r10_bio;
struct bio *read_bio; struct bio *read_bio;
...@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio) ...@@ -981,11 +981,11 @@ static int make_request(struct request_queue *q, struct bio * bio)
static void status(struct seq_file *seq, mddev_t *mddev) static void status(struct seq_file *seq, mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i; int i;
if (conf->near_copies < conf->raid_disks) if (conf->near_copies < conf->raid_disks)
seq_printf(seq, " %dK chunks", mddev->chunk_size/1024); seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
if (conf->near_copies > 1) if (conf->near_copies > 1)
seq_printf(seq, " %d near-copies", conf->near_copies); seq_printf(seq, " %d near-copies", conf->near_copies);
if (conf->far_copies > 1) { if (conf->far_copies > 1) {
...@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -1006,7 +1006,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
static void error(mddev_t *mddev, mdk_rdev_t *rdev) static void error(mddev_t *mddev, mdk_rdev_t *rdev)
{ {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
/* /*
* If it is not operational, then we have already marked it as dead * If it is not operational, then we have already marked it as dead
...@@ -1215,7 +1215,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number) ...@@ -1215,7 +1215,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
static void end_sync_read(struct bio *bio, int error) static void end_sync_read(struct bio *bio, int error)
{ {
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
conf_t *conf = mddev_to_conf(r10_bio->mddev); conf_t *conf = r10_bio->mddev->private;
int i,d; int i,d;
for (i=0; i<conf->copies; i++) for (i=0; i<conf->copies; i++)
...@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1253,7 +1253,7 @@ static void end_sync_write(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private); r10bio_t * r10_bio = (r10bio_t *)(bio->bi_private);
mddev_t *mddev = r10_bio->mddev; mddev_t *mddev = r10_bio->mddev;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i,d; int i,d;
for (i = 0; i < conf->copies; i++) for (i = 0; i < conf->copies; i++)
...@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error) ...@@ -1300,7 +1300,7 @@ static void end_sync_write(struct bio *bio, int error)
*/ */
static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i, first; int i, first;
struct bio *tbio, *fbio; struct bio *tbio, *fbio;
...@@ -1400,7 +1400,7 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio) ...@@ -1400,7 +1400,7 @@ static void sync_request_write(mddev_t *mddev, r10bio_t *r10_bio)
static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio) static void recovery_request_write(mddev_t *mddev, r10bio_t *r10_bio)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
int i, d; int i, d;
struct bio *bio, *wbio; struct bio *bio, *wbio;
...@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev) ...@@ -1549,7 +1549,7 @@ static void raid10d(mddev_t *mddev)
r10bio_t *r10_bio; r10bio_t *r10_bio;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
int unplug=0; int unplug=0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev) ...@@ -1572,7 +1572,7 @@ static void raid10d(mddev_t *mddev)
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
mddev = r10_bio->mddev; mddev = r10_bio->mddev;
conf = mddev_to_conf(mddev); conf = mddev->private;
if (test_bit(R10BIO_IsSync, &r10_bio->state)) { if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
sync_request_write(mddev, r10_bio); sync_request_write(mddev, r10_bio);
unplug = 1; unplug = 1;
...@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf) ...@@ -1680,7 +1680,7 @@ static int init_resync(conf_t *conf)
static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
r10bio_t *r10_bio; r10bio_t *r10_bio;
struct bio *biolist = NULL, *bio; struct bio *biolist = NULL, *bio;
sector_t max_sector, nr_sectors; sector_t max_sector, nr_sectors;
...@@ -2026,7 +2026,7 @@ static sector_t ...@@ -2026,7 +2026,7 @@ static sector_t
raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks) raid10_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{ {
sector_t size; sector_t size;
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
if (!raid_disks) if (!raid_disks)
raid_disks = mddev->raid_disks; raid_disks = mddev->raid_disks;
...@@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev) ...@@ -2050,9 +2050,10 @@ static int run(mddev_t *mddev)
int nc, fc, fo; int nc, fc, fo;
sector_t stride, size; sector_t stride, size;
if (mddev->chunk_size < PAGE_SIZE) { if (mddev->chunk_sectors < (PAGE_SIZE >> 9) ||
!is_power_of_2(mddev->chunk_sectors)) {
printk(KERN_ERR "md/raid10: chunk size must be " printk(KERN_ERR "md/raid10: chunk size must be "
"at least PAGE_SIZE(%ld).\n", PAGE_SIZE); "at least PAGE_SIZE(%ld) and be a power of 2.\n", PAGE_SIZE);
return -EINVAL; return -EINVAL;
} }
...@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev) ...@@ -2095,8 +2096,8 @@ static int run(mddev_t *mddev)
conf->far_copies = fc; conf->far_copies = fc;
conf->copies = nc*fc; conf->copies = nc*fc;
conf->far_offset = fo; conf->far_offset = fo;
conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1; conf->chunk_mask = mddev->chunk_sectors - 1;
conf->chunk_shift = ffz(~mddev->chunk_size) - 9; conf->chunk_shift = ffz(~mddev->chunk_sectors);
size = mddev->dev_sectors >> conf->chunk_shift; size = mddev->dev_sectors >> conf->chunk_shift;
sector_div(size, fc); sector_div(size, fc);
size = size * conf->raid_disks; size = size * conf->raid_disks;
...@@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev) ...@@ -2185,6 +2186,10 @@ static int run(mddev_t *mddev)
goto out_free_conf; goto out_free_conf;
} }
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "raid10: %s is not clean"
" -- starting background reconstruction\n",
mdname(mddev));
printk(KERN_INFO printk(KERN_INFO
"raid10: raid set %s active with %d out of %d devices\n", "raid10: raid set %s active with %d out of %d devices\n",
mdname(mddev), mddev->raid_disks - mddev->degraded, mdname(mddev), mddev->raid_disks - mddev->degraded,
...@@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev) ...@@ -2204,7 +2209,8 @@ static int run(mddev_t *mddev)
* maybe... * maybe...
*/ */
{ {
int stripe = conf->raid_disks * (mddev->chunk_size / PAGE_SIZE); int stripe = conf->raid_disks *
((mddev->chunk_sectors << 9) / PAGE_SIZE);
stripe /= conf->near_copies; stripe /= conf->near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2* stripe) if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
mddev->queue->backing_dev_info.ra_pages = 2* stripe; mddev->queue->backing_dev_info.ra_pages = 2* stripe;
...@@ -2227,7 +2233,7 @@ static int run(mddev_t *mddev) ...@@ -2227,7 +2233,7 @@ static int run(mddev_t *mddev)
static int stop(mddev_t *mddev) static int stop(mddev_t *mddev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
raise_barrier(conf, 0); raise_barrier(conf, 0);
lower_barrier(conf); lower_barrier(conf);
...@@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev) ...@@ -2245,7 +2251,7 @@ static int stop(mddev_t *mddev)
static void raid10_quiesce(mddev_t *mddev, int state) static void raid10_quiesce(mddev_t *mddev, int state)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev->private;
switch(state) { switch(state) {
case 1: case 1:
......
...@@ -61,12 +61,6 @@ struct r10_private_data_s { ...@@ -61,12 +61,6 @@ struct r10_private_data_s {
typedef struct r10_private_data_s conf_t; typedef struct r10_private_data_s conf_t;
/*
* this is the only point in the RAID code where we violate
* C type safety. mddev->private is an 'opaque' pointer.
*/
#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
/* /*
* this is our 'private' RAID10 bio. * this is our 'private' RAID10 bio.
* *
......
...@@ -1274,8 +1274,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, ...@@ -1274,8 +1274,8 @@ static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
sector_t new_sector; sector_t new_sector;
int algorithm = previous ? conf->prev_algo int algorithm = previous ? conf->prev_algo
: conf->algorithm; : conf->algorithm;
int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) int sectors_per_chunk = previous ? conf->prev_chunk_sectors
: (conf->chunk_size >> 9); : conf->chunk_sectors;
int raid_disks = previous ? conf->previous_raid_disks int raid_disks = previous ? conf->previous_raid_disks
: conf->raid_disks; : conf->raid_disks;
int data_disks = raid_disks - conf->max_degraded; int data_disks = raid_disks - conf->max_degraded;
...@@ -1480,8 +1480,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) ...@@ -1480,8 +1480,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
int raid_disks = sh->disks; int raid_disks = sh->disks;
int data_disks = raid_disks - conf->max_degraded; int data_disks = raid_disks - conf->max_degraded;
sector_t new_sector = sh->sector, check; sector_t new_sector = sh->sector, check;
int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) int sectors_per_chunk = previous ? conf->prev_chunk_sectors
: (conf->chunk_size >> 9); : conf->chunk_sectors;
int algorithm = previous ? conf->prev_algo int algorithm = previous ? conf->prev_algo
: conf->algorithm; : conf->algorithm;
sector_t stripe; sector_t stripe;
...@@ -1997,8 +1997,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, ...@@ -1997,8 +1997,7 @@ static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
struct stripe_head *sh) struct stripe_head *sh)
{ {
int sectors_per_chunk = int sectors_per_chunk =
previous ? (conf->prev_chunk >> 9) previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
: (conf->chunk_size >> 9);
int dd_idx; int dd_idx;
int chunk_offset = sector_div(stripe, sectors_per_chunk); int chunk_offset = sector_div(stripe, sectors_per_chunk);
int disks = previous ? conf->previous_raid_disks : conf->raid_disks; int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
...@@ -3284,7 +3283,7 @@ static void activate_bit_delay(raid5_conf_t *conf) ...@@ -3284,7 +3283,7 @@ static void activate_bit_delay(raid5_conf_t *conf)
static void unplug_slaves(mddev_t *mddev) static void unplug_slaves(mddev_t *mddev)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
int i; int i;
rcu_read_lock(); rcu_read_lock();
...@@ -3308,7 +3307,7 @@ static void unplug_slaves(mddev_t *mddev) ...@@ -3308,7 +3307,7 @@ static void unplug_slaves(mddev_t *mddev)
static void raid5_unplug_device(struct request_queue *q) static void raid5_unplug_device(struct request_queue *q)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
...@@ -3327,7 +3326,7 @@ static void raid5_unplug_device(struct request_queue *q) ...@@ -3327,7 +3326,7 @@ static void raid5_unplug_device(struct request_queue *q)
static int raid5_congested(void *data, int bits) static int raid5_congested(void *data, int bits)
{ {
mddev_t *mddev = data; mddev_t *mddev = data;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
/* No difference between reads and writes. Just check /* No difference between reads and writes. Just check
* how busy the stripe_cache is * how busy the stripe_cache is
...@@ -3352,14 +3351,14 @@ static int raid5_mergeable_bvec(struct request_queue *q, ...@@ -3352,14 +3351,14 @@ static int raid5_mergeable_bvec(struct request_queue *q,
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max; int max;
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bvm->bi_size >> 9; unsigned int bio_sectors = bvm->bi_size >> 9;
if ((bvm->bi_rw & 1) == WRITE) if ((bvm->bi_rw & 1) == WRITE)
return biovec->bv_len; /* always allow writes to be mergeable */ return biovec->bv_len; /* always allow writes to be mergeable */
if (mddev->new_chunk < mddev->chunk_size) if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk >> 9; chunk_sectors = mddev->new_chunk_sectors;
max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
if (max < 0) max = 0; if (max < 0) max = 0;
if (max <= biovec->bv_len && bio_sectors == 0) if (max <= biovec->bv_len && bio_sectors == 0)
...@@ -3372,11 +3371,11 @@ static int raid5_mergeable_bvec(struct request_queue *q, ...@@ -3372,11 +3371,11 @@ static int raid5_mergeable_bvec(struct request_queue *q,
static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
{ {
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_size >> 9; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio->bi_size >> 9; unsigned int bio_sectors = bio->bi_size >> 9;
if (mddev->new_chunk < mddev->chunk_size) if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk >> 9; chunk_sectors = mddev->new_chunk_sectors;
return chunk_sectors >= return chunk_sectors >=
((sector & (chunk_sectors - 1)) + bio_sectors); ((sector & (chunk_sectors - 1)) + bio_sectors);
} }
...@@ -3440,7 +3439,7 @@ static void raid5_align_endio(struct bio *bi, int error) ...@@ -3440,7 +3439,7 @@ static void raid5_align_endio(struct bio *bi, int error)
bio_put(bi); bio_put(bi);
mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata;
conf = mddev_to_conf(mddev); conf = mddev->private;
rdev = (void*)raid_bi->bi_next; rdev = (void*)raid_bi->bi_next;
raid_bi->bi_next = NULL; raid_bi->bi_next = NULL;
...@@ -3482,7 +3481,7 @@ static int bio_fits_rdev(struct bio *bi) ...@@ -3482,7 +3481,7 @@ static int bio_fits_rdev(struct bio *bi)
static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
unsigned int dd_idx; unsigned int dd_idx;
struct bio* align_bi; struct bio* align_bi;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
...@@ -3599,7 +3598,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) ...@@ -3599,7 +3598,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
static int make_request(struct request_queue *q, struct bio * bi) static int make_request(struct request_queue *q, struct bio * bi)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
int dd_idx; int dd_idx;
sector_t new_sector; sector_t new_sector;
sector_t logical_sector, last_sector; sector_t logical_sector, last_sector;
...@@ -3696,6 +3695,7 @@ static int make_request(struct request_queue *q, struct bio * bi) ...@@ -3696,6 +3695,7 @@ static int make_request(struct request_queue *q, struct bio * bi)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (must_retry) { if (must_retry) {
release_stripe(sh); release_stripe(sh);
schedule();
goto retry; goto retry;
} }
} }
...@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped ...@@ -3791,10 +3791,10 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
* If old and new chunk sizes differ, we need to process the * If old and new chunk sizes differ, we need to process the
* largest of these * largest of these
*/ */
if (mddev->new_chunk > mddev->chunk_size) if (mddev->new_chunk_sectors > mddev->chunk_sectors)
reshape_sectors = mddev->new_chunk / 512; reshape_sectors = mddev->new_chunk_sectors;
else else
reshape_sectors = mddev->chunk_size / 512; reshape_sectors = mddev->chunk_sectors;
/* we update the metadata when there is more than 3Meg /* we update the metadata when there is more than 3Meg
* in the block range (that is rather arbitrary, should * in the block range (that is rather arbitrary, should
...@@ -3917,7 +3917,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped ...@@ -3917,7 +3917,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
1, &dd_idx, NULL); 1, &dd_idx, NULL);
last_sector = last_sector =
raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
*(new_data_disks) - 1), * new_data_disks - 1),
1, &dd_idx, NULL); 1, &dd_idx, NULL);
if (last_sector >= mddev->dev_sectors) if (last_sector >= mddev->dev_sectors)
last_sector = mddev->dev_sectors - 1; last_sector = mddev->dev_sectors - 1;
...@@ -3946,7 +3946,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped ...@@ -3946,7 +3946,7 @@ static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped
wait_event(conf->wait_for_overlap, wait_event(conf->wait_for_overlap,
atomic_read(&conf->reshape_stripes) == 0); atomic_read(&conf->reshape_stripes) == 0);
mddev->reshape_position = conf->reshape_progress; mddev->reshape_position = conf->reshape_progress;
mddev->curr_resync_completed = mddev->curr_resync; mddev->curr_resync_completed = mddev->curr_resync + reshape_sectors;
conf->reshape_checkpoint = jiffies; conf->reshape_checkpoint = jiffies;
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
...@@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) ...@@ -4129,7 +4129,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
static void raid5d(mddev_t *mddev) static void raid5d(mddev_t *mddev)
{ {
struct stripe_head *sh; struct stripe_head *sh;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
int handled; int handled;
pr_debug("+++ raid5d active\n"); pr_debug("+++ raid5d active\n");
...@@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev) ...@@ -4185,7 +4185,7 @@ static void raid5d(mddev_t *mddev)
static ssize_t static ssize_t
raid5_show_stripe_cache_size(mddev_t *mddev, char *page) raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
if (conf) if (conf)
return sprintf(page, "%d\n", conf->max_nr_stripes); return sprintf(page, "%d\n", conf->max_nr_stripes);
else else
...@@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page) ...@@ -4195,7 +4195,7 @@ raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
static ssize_t static ssize_t
raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
unsigned long new; unsigned long new;
int err; int err;
...@@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, ...@@ -4233,7 +4233,7 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
static ssize_t static ssize_t
raid5_show_preread_threshold(mddev_t *mddev, char *page) raid5_show_preread_threshold(mddev_t *mddev, char *page)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
if (conf) if (conf)
return sprintf(page, "%d\n", conf->bypass_threshold); return sprintf(page, "%d\n", conf->bypass_threshold);
else else
...@@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page) ...@@ -4243,7 +4243,7 @@ raid5_show_preread_threshold(mddev_t *mddev, char *page)
static ssize_t static ssize_t
raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
unsigned long new; unsigned long new;
if (len >= PAGE_SIZE) if (len >= PAGE_SIZE)
return -EINVAL; return -EINVAL;
...@@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, ...@@ -4267,7 +4267,7 @@ raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
static ssize_t static ssize_t
stripe_cache_active_show(mddev_t *mddev, char *page) stripe_cache_active_show(mddev_t *mddev, char *page)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
if (conf) if (conf)
return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
else else
...@@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = { ...@@ -4291,7 +4291,7 @@ static struct attribute_group raid5_attrs_group = {
static sector_t static sector_t
raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
if (!sectors) if (!sectors)
sectors = mddev->dev_sectors; sectors = mddev->dev_sectors;
...@@ -4303,8 +4303,8 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -4303,8 +4303,8 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
raid_disks = conf->previous_raid_disks; raid_disks = conf->previous_raid_disks;
} }
sectors &= ~((sector_t)mddev->chunk_size/512 - 1); sectors &= ~((sector_t)mddev->chunk_sectors - 1);
sectors &= ~((sector_t)mddev->new_chunk/512 - 1); sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
return sectors * (raid_disks - conf->max_degraded); return sectors * (raid_disks - conf->max_degraded);
} }
...@@ -4336,9 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) ...@@ -4336,9 +4336,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { if (!mddev->new_chunk_sectors ||
(mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
!is_power_of_2(mddev->new_chunk_sectors)) {
printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", printk(KERN_ERR "raid5: invalid chunk size %d for %s\n",
mddev->new_chunk, mdname(mddev)); mddev->new_chunk_sectors << 9, mdname(mddev));
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -4401,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) ...@@ -4401,7 +4403,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
conf->fullsync = 1; conf->fullsync = 1;
} }
conf->chunk_size = mddev->new_chunk; conf->chunk_sectors = mddev->new_chunk_sectors;
conf->level = mddev->new_level; conf->level = mddev->new_level;
if (conf->level == 6) if (conf->level == 6)
conf->max_degraded = 2; conf->max_degraded = 2;
...@@ -4411,7 +4413,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) ...@@ -4411,7 +4413,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
conf->max_nr_stripes = NR_STRIPES; conf->max_nr_stripes = NR_STRIPES;
conf->reshape_progress = mddev->reshape_position; conf->reshape_progress = mddev->reshape_position;
if (conf->reshape_progress != MaxSector) { if (conf->reshape_progress != MaxSector) {
conf->prev_chunk = mddev->chunk_size; conf->prev_chunk_sectors = mddev->chunk_sectors;
conf->prev_algo = mddev->layout; conf->prev_algo = mddev->layout;
} }
...@@ -4453,6 +4455,10 @@ static int run(mddev_t *mddev) ...@@ -4453,6 +4455,10 @@ static int run(mddev_t *mddev)
int working_disks = 0; int working_disks = 0;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "raid5: %s is not clean"
" -- starting background reconstruction\n",
mdname(mddev));
if (mddev->reshape_position != MaxSector) { if (mddev->reshape_position != MaxSector) {
/* Check that we can continue the reshape. /* Check that we can continue the reshape.
* Currently only disks can change, it must * Currently only disks can change, it must
...@@ -4475,7 +4481,7 @@ static int run(mddev_t *mddev) ...@@ -4475,7 +4481,7 @@ static int run(mddev_t *mddev)
* geometry. * geometry.
*/ */
here_new = mddev->reshape_position; here_new = mddev->reshape_position;
if (sector_div(here_new, (mddev->new_chunk>>9)* if (sector_div(here_new, mddev->new_chunk_sectors *
(mddev->raid_disks - max_degraded))) { (mddev->raid_disks - max_degraded))) {
printk(KERN_ERR "raid5: reshape_position not " printk(KERN_ERR "raid5: reshape_position not "
"on a stripe boundary\n"); "on a stripe boundary\n");
...@@ -4483,7 +4489,7 @@ static int run(mddev_t *mddev) ...@@ -4483,7 +4489,7 @@ static int run(mddev_t *mddev)
} }
/* here_new is the stripe we will write to */ /* here_new is the stripe we will write to */
here_old = mddev->reshape_position; here_old = mddev->reshape_position;
sector_div(here_old, (mddev->chunk_size>>9)* sector_div(here_old, mddev->chunk_sectors *
(old_disks-max_degraded)); (old_disks-max_degraded));
/* here_old is the first stripe that we might need to read /* here_old is the first stripe that we might need to read
* from */ * from */
...@@ -4498,7 +4504,7 @@ static int run(mddev_t *mddev) ...@@ -4498,7 +4504,7 @@ static int run(mddev_t *mddev)
} else { } else {
BUG_ON(mddev->level != mddev->new_level); BUG_ON(mddev->level != mddev->new_level);
BUG_ON(mddev->layout != mddev->new_layout); BUG_ON(mddev->layout != mddev->new_layout);
BUG_ON(mddev->chunk_size != mddev->new_chunk); BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
BUG_ON(mddev->delta_disks != 0); BUG_ON(mddev->delta_disks != 0);
} }
...@@ -4532,7 +4538,7 @@ static int run(mddev_t *mddev) ...@@ -4532,7 +4538,7 @@ static int run(mddev_t *mddev)
} }
/* device size must be a multiple of chunk size */ /* device size must be a multiple of chunk size */
mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
mddev->resync_max_sectors = mddev->dev_sectors; mddev->resync_max_sectors = mddev->dev_sectors;
if (mddev->degraded > 0 && if (mddev->degraded > 0 &&
...@@ -4581,7 +4587,7 @@ static int run(mddev_t *mddev) ...@@ -4581,7 +4587,7 @@ static int run(mddev_t *mddev)
{ {
int data_disks = conf->previous_raid_disks - conf->max_degraded; int data_disks = conf->previous_raid_disks - conf->max_degraded;
int stripe = data_disks * int stripe = data_disks *
(mddev->chunk_size / PAGE_SIZE); ((mddev->chunk_sectors << 9) / PAGE_SIZE);
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe; mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
} }
...@@ -4678,7 +4684,8 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -4678,7 +4684,8 @@ static void status(struct seq_file *seq, mddev_t *mddev)
raid5_conf_t *conf = (raid5_conf_t *) mddev->private; raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
int i; int i;
seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
mddev->chunk_sectors / 2, mddev->layout);
seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
for (i = 0; i < conf->raid_disks; i++) for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s", seq_printf (seq, "%s",
...@@ -4826,7 +4833,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) ...@@ -4826,7 +4833,7 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
* any io in the removed space completes, but it hardly seems * any io in the removed space completes, but it hardly seems
* worth it. * worth it.
*/ */
sectors &= ~((sector_t)mddev->chunk_size/512 - 1); sectors &= ~((sector_t)mddev->chunk_sectors - 1);
md_set_array_sectors(mddev, raid5_size(mddev, sectors, md_set_array_sectors(mddev, raid5_size(mddev, sectors,
mddev->raid_disks)); mddev->raid_disks));
if (mddev->array_sectors > if (mddev->array_sectors >
...@@ -4843,14 +4850,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors) ...@@ -4843,14 +4850,37 @@ static int raid5_resize(mddev_t *mddev, sector_t sectors)
return 0; return 0;
} }
static int raid5_check_reshape(mddev_t *mddev) static int check_stripe_cache(mddev_t *mddev)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); /* Can only proceed if there are plenty of stripe_heads.
* We need a minimum of one full stripe,, and for sensible progress
* it is best to have about 4 times that.
* If we require 4 times, then the default 256 4K stripe_heads will
* allow for chunk sizes up to 256K, which is probably OK.
* If the chunk size is greater, user-space should request more
* stripe_heads first.
*/
raid5_conf_t *conf = mddev->private;
if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes ||
((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4
> conf->max_nr_stripes) {
printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
/ STRIPE_SIZE)*4);
return 0;
}
return 1;
}
static int check_reshape(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
if (mddev->delta_disks == 0 && if (mddev->delta_disks == 0 &&
mddev->new_layout == mddev->layout && mddev->new_layout == mddev->layout &&
mddev->new_chunk == mddev->chunk_size) mddev->new_chunk_sectors == mddev->chunk_sectors)
return -EINVAL; /* nothing to do */ return 0; /* nothing to do */
if (mddev->bitmap) if (mddev->bitmap)
/* Cannot grow a bitmap yet */ /* Cannot grow a bitmap yet */
return -EBUSY; return -EBUSY;
...@@ -4869,28 +4899,15 @@ static int raid5_check_reshape(mddev_t *mddev) ...@@ -4869,28 +4899,15 @@ static int raid5_check_reshape(mddev_t *mddev)
return -EINVAL; return -EINVAL;
} }
/* Can only proceed if there are plenty of stripe_heads. if (!check_stripe_cache(mddev))
* We need a minimum of one full stripe,, and for sensible progress
* it is best to have about 4 times that.
* If we require 4 times, then the default 256 4K stripe_heads will
* allow for chunk sizes up to 256K, which is probably OK.
* If the chunk size is greater, user-space should request more
* stripe_heads first.
*/
if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes ||
(mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) {
printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n",
(max(mddev->chunk_size, mddev->new_chunk)
/ STRIPE_SIZE)*4);
return -ENOSPC; return -ENOSPC;
}
return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); return resize_stripes(conf, conf->raid_disks + mddev->delta_disks);
} }
static int raid5_start_reshape(mddev_t *mddev) static int raid5_start_reshape(mddev_t *mddev)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
mdk_rdev_t *rdev; mdk_rdev_t *rdev;
int spares = 0; int spares = 0;
int added_devices = 0; int added_devices = 0;
...@@ -4899,6 +4916,9 @@ static int raid5_start_reshape(mddev_t *mddev) ...@@ -4899,6 +4916,9 @@ static int raid5_start_reshape(mddev_t *mddev)
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY; return -EBUSY;
if (!check_stripe_cache(mddev))
return -ENOSPC;
list_for_each_entry(rdev, &mddev->disks, same_set) list_for_each_entry(rdev, &mddev->disks, same_set)
if (rdev->raid_disk < 0 && if (rdev->raid_disk < 0 &&
!test_bit(Faulty, &rdev->flags)) !test_bit(Faulty, &rdev->flags))
...@@ -4925,8 +4945,8 @@ static int raid5_start_reshape(mddev_t *mddev) ...@@ -4925,8 +4945,8 @@ static int raid5_start_reshape(mddev_t *mddev)
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks; conf->previous_raid_disks = conf->raid_disks;
conf->raid_disks += mddev->delta_disks; conf->raid_disks += mddev->delta_disks;
conf->prev_chunk = conf->chunk_size; conf->prev_chunk_sectors = conf->chunk_sectors;
conf->chunk_size = mddev->new_chunk; conf->chunk_sectors = mddev->new_chunk_sectors;
conf->prev_algo = conf->algorithm; conf->prev_algo = conf->algorithm;
conf->algorithm = mddev->new_layout; conf->algorithm = mddev->new_layout;
if (mddev->delta_disks < 0) if (mddev->delta_disks < 0)
...@@ -5008,7 +5028,7 @@ static void end_reshape(raid5_conf_t *conf) ...@@ -5008,7 +5028,7 @@ static void end_reshape(raid5_conf_t *conf)
*/ */
{ {
int data_disks = conf->raid_disks - conf->max_degraded; int data_disks = conf->raid_disks - conf->max_degraded;
int stripe = data_disks * (conf->chunk_size int stripe = data_disks * ((conf->chunk_sectors << 9)
/ PAGE_SIZE); / PAGE_SIZE);
if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
...@@ -5022,7 +5042,7 @@ static void end_reshape(raid5_conf_t *conf) ...@@ -5022,7 +5042,7 @@ static void end_reshape(raid5_conf_t *conf)
static void raid5_finish_reshape(mddev_t *mddev) static void raid5_finish_reshape(mddev_t *mddev)
{ {
struct block_device *bdev; struct block_device *bdev;
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
...@@ -5053,7 +5073,7 @@ static void raid5_finish_reshape(mddev_t *mddev) ...@@ -5053,7 +5073,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
raid5_remove_disk(mddev, d); raid5_remove_disk(mddev, d);
} }
mddev->layout = conf->algorithm; mddev->layout = conf->algorithm;
mddev->chunk_size = conf->chunk_size; mddev->chunk_sectors = conf->chunk_sectors;
mddev->reshape_position = MaxSector; mddev->reshape_position = MaxSector;
mddev->delta_disks = 0; mddev->delta_disks = 0;
} }
...@@ -5061,7 +5081,7 @@ static void raid5_finish_reshape(mddev_t *mddev) ...@@ -5061,7 +5081,7 @@ static void raid5_finish_reshape(mddev_t *mddev)
static void raid5_quiesce(mddev_t *mddev, int state) static void raid5_quiesce(mddev_t *mddev, int state)
{ {
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
switch(state) { switch(state) {
case 2: /* resume for a suspend */ case 2: /* resume for a suspend */
...@@ -5111,7 +5131,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev) ...@@ -5111,7 +5131,7 @@ static void *raid5_takeover_raid1(mddev_t *mddev)
mddev->new_level = 5; mddev->new_level = 5;
mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
mddev->new_chunk = chunksect << 9; mddev->new_chunk_sectors = chunksect;
return setup_conf(mddev); return setup_conf(mddev);
} }
...@@ -5150,24 +5170,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev) ...@@ -5150,24 +5170,24 @@ static void *raid5_takeover_raid6(mddev_t *mddev)
} }
static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) static int raid5_check_reshape(mddev_t *mddev)
{ {
/* For a 2-drive array, the layout and chunk size can be changed /* For a 2-drive array, the layout and chunk size can be changed
* immediately as not restriping is needed. * immediately as not restriping is needed.
* For larger arrays we record the new value - after validation * For larger arrays we record the new value - after validation
* to be used by a reshape pass. * to be used by a reshape pass.
*/ */
raid5_conf_t *conf = mddev_to_conf(mddev); raid5_conf_t *conf = mddev->private;
int new_chunk = mddev->new_chunk_sectors;
if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
return -EINVAL; return -EINVAL;
if (new_chunk > 0) { if (new_chunk > 0) {
if (new_chunk & (new_chunk-1)) if (!is_power_of_2(new_chunk))
/* not a power of 2 */
return -EINVAL; return -EINVAL;
if (new_chunk < PAGE_SIZE) if (new_chunk < (PAGE_SIZE>>9))
return -EINVAL; return -EINVAL;
if (mddev->array_sectors & ((new_chunk>>9)-1)) if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */ /* not factor of array size */
return -EINVAL; return -EINVAL;
} }
...@@ -5175,49 +5195,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) ...@@ -5175,49 +5195,39 @@ static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk)
/* They look valid */ /* They look valid */
if (mddev->raid_disks == 2) { if (mddev->raid_disks == 2) {
/* can make the change immediately */
if (new_layout >= 0) { if (mddev->new_layout >= 0) {
conf->algorithm = new_layout; conf->algorithm = mddev->new_layout;
mddev->layout = mddev->new_layout = new_layout; mddev->layout = mddev->new_layout;
} }
if (new_chunk > 0) { if (new_chunk > 0) {
conf->chunk_size = new_chunk; conf->chunk_sectors = new_chunk ;
mddev->chunk_size = mddev->new_chunk = new_chunk; mddev->chunk_sectors = new_chunk;
} }
set_bit(MD_CHANGE_DEVS, &mddev->flags); set_bit(MD_CHANGE_DEVS, &mddev->flags);
md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->thread);
} else {
if (new_layout >= 0)
mddev->new_layout = new_layout;
if (new_chunk > 0)
mddev->new_chunk = new_chunk;
} }
return 0; return check_reshape(mddev);
} }
static int raid6_reconfig(mddev_t *mddev, int new_layout, int new_chunk) static int raid6_check_reshape(mddev_t *mddev)
{ {
if (new_layout >= 0 && !algorithm_valid_raid6(new_layout)) int new_chunk = mddev->new_chunk_sectors;
if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
return -EINVAL; return -EINVAL;
if (new_chunk > 0) { if (new_chunk > 0) {
if (new_chunk & (new_chunk-1)) if (!is_power_of_2(new_chunk))
/* not a power of 2 */
return -EINVAL; return -EINVAL;
if (new_chunk < PAGE_SIZE) if (new_chunk < (PAGE_SIZE >> 9))
return -EINVAL; return -EINVAL;
if (mddev->array_sectors & ((new_chunk>>9)-1)) if (mddev->array_sectors & (new_chunk-1))
/* not factor of array size */ /* not factor of array size */
return -EINVAL; return -EINVAL;
} }
/* They look valid */ /* They look valid */
return check_reshape(mddev);
if (new_layout >= 0)
mddev->new_layout = new_layout;
if (new_chunk > 0)
mddev->new_chunk = new_chunk;
return 0;
} }
static void *raid5_takeover(mddev_t *mddev) static void *raid5_takeover(mddev_t *mddev)
...@@ -5227,8 +5237,6 @@ static void *raid5_takeover(mddev_t *mddev) ...@@ -5227,8 +5237,6 @@ static void *raid5_takeover(mddev_t *mddev)
* raid1 - if there are two drives. We need to know the chunk size * raid1 - if there are two drives. We need to know the chunk size
* raid4 - trivial - just use a raid4 layout. * raid4 - trivial - just use a raid4 layout.
* raid6 - Providing it is a *_6 layout * raid6 - Providing it is a *_6 layout
*
* For now, just do raid1
*/ */
if (mddev->level == 1) if (mddev->level == 1)
...@@ -5310,12 +5318,11 @@ static struct mdk_personality raid6_personality = ...@@ -5310,12 +5318,11 @@ static struct mdk_personality raid6_personality =
.sync_request = sync_request, .sync_request = sync_request,
.resize = raid5_resize, .resize = raid5_resize,
.size = raid5_size, .size = raid5_size,
.check_reshape = raid5_check_reshape, .check_reshape = raid6_check_reshape,
.start_reshape = raid5_start_reshape, .start_reshape = raid5_start_reshape,
.finish_reshape = raid5_finish_reshape, .finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid6_takeover, .takeover = raid6_takeover,
.reconfig = raid6_reconfig,
}; };
static struct mdk_personality raid5_personality = static struct mdk_personality raid5_personality =
{ {
...@@ -5338,7 +5345,6 @@ static struct mdk_personality raid5_personality = ...@@ -5338,7 +5345,6 @@ static struct mdk_personality raid5_personality =
.finish_reshape = raid5_finish_reshape, .finish_reshape = raid5_finish_reshape,
.quiesce = raid5_quiesce, .quiesce = raid5_quiesce,
.takeover = raid5_takeover, .takeover = raid5_takeover,
.reconfig = raid5_reconfig,
}; };
static struct mdk_personality raid4_personality = static struct mdk_personality raid4_personality =
......
...@@ -334,7 +334,8 @@ struct raid5_private_data { ...@@ -334,7 +334,8 @@ struct raid5_private_data {
struct hlist_head *stripe_hashtbl; struct hlist_head *stripe_hashtbl;
mddev_t *mddev; mddev_t *mddev;
struct disk_info *spare; struct disk_info *spare;
int chunk_size, level, algorithm; int chunk_sectors;
int level, algorithm;
int max_degraded; int max_degraded;
int raid_disks; int raid_disks;
int max_nr_stripes; int max_nr_stripes;
...@@ -350,7 +351,8 @@ struct raid5_private_data { ...@@ -350,7 +351,8 @@ struct raid5_private_data {
*/ */
sector_t reshape_safe; sector_t reshape_safe;
int previous_raid_disks; int previous_raid_disks;
int prev_chunk, prev_algo; int prev_chunk_sectors;
int prev_algo;
short generation; /* increments with every reshape */ short generation; /* increments with every reshape */
unsigned long reshape_checkpoint; /* Time we last updated unsigned long reshape_checkpoint; /* Time we last updated
* metadata */ * metadata */
...@@ -408,8 +410,6 @@ struct raid5_private_data { ...@@ -408,8 +410,6 @@ struct raid5_private_data {
typedef struct raid5_private_data raid5_conf_t; typedef struct raid5_private_data raid5_conf_t;
#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
/* /*
* Our supported algorithms * Our supported algorithms
*/ */
......
...@@ -232,7 +232,7 @@ struct mdp_superblock_1 { ...@@ -232,7 +232,7 @@ struct mdp_superblock_1 {
__le64 reshape_position; /* next address in array-space for reshape */ __le64 reshape_position; /* next address in array-space for reshape */
__le32 delta_disks; /* change in number of raid_disks */ __le32 delta_disks; /* change in number of raid_disks */
__le32 new_layout; /* new layout */ __le32 new_layout; /* new layout */
__le32 new_chunk; /* new chunk size (bytes) */ __le32 new_chunk; /* new chunk size (512byte sectors) */
__u8 pad1[128-124]; /* set to 0 when written */ __u8 pad1[128-124]; /* set to 0 when written */
/* constant this-device information - 64 bytes */ /* constant this-device information - 64 bytes */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment