Commit c3ae1f33 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://neil.brown.name/md

* 'for-linus' of git://neil.brown.name/md: (34 commits)
  md: Fix some bugs in recovery_disabled handling.
  md/raid5: fix bug that could result in reads from a failed device.
  lib/raid6: Fix filename emitted in generated code
  md.c: trivial comment fix
  MD: Allow restarting an interrupted incremental recovery.
  md: clear In_sync bit on devices added to an active array.
  md: add proper write-congestion reporting to RAID1 and RAID10.
  md: rename "mdk_personality" to "md_personality"
  md/bitmap remove fault injection options.
  md/raid5: typedef removal: raid5_conf_t -> struct r5conf
  md/raid1: typedef removal: conf_t -> struct r1conf
  md/raid10: typedef removal: conf_t -> struct r10conf
  md/raid0: typedef removal: raid0_conf_t -> struct r0conf
  md/multipath: typedef removal: multipath_conf_t -> struct mpconf
  md/linear: typedef removal: linear_conf_t -> struct linear_conf
  md/faulty: remove typedef: conf_t -> struct faulty_conf
  md/linear: remove typedefs: dev_info_t -> struct dev_info
  md: remove typedefs: mirror_info_t -> struct mirror_info
  md: remove typedefs: r10bio_t -> struct r10bio and r1bio_t -> struct r1bio
  md: remove typedefs: mdk_thread_t -> struct md_thread
  ...
parents c28cfd60 d890fa2b
This diff is collapsed.
...@@ -193,7 +193,7 @@ struct bitmap { ...@@ -193,7 +193,7 @@ struct bitmap {
unsigned long pages; /* total number of pages in the bitmap */ unsigned long pages; /* total number of pages in the bitmap */
unsigned long missing_pages; /* number of pages not yet allocated */ unsigned long missing_pages; /* number of pages not yet allocated */
mddev_t *mddev; /* the md device that the bitmap is for */ struct mddev *mddev; /* the md device that the bitmap is for */
/* bitmap chunksize -- how much data does each bit represent? */ /* bitmap chunksize -- how much data does each bit represent? */
unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
...@@ -238,10 +238,10 @@ struct bitmap { ...@@ -238,10 +238,10 @@ struct bitmap {
/* the bitmap API */ /* the bitmap API */
/* these are used only by md/bitmap */ /* these are used only by md/bitmap */
int bitmap_create(mddev_t *mddev); int bitmap_create(struct mddev *mddev);
int bitmap_load(mddev_t *mddev); int bitmap_load(struct mddev *mddev);
void bitmap_flush(mddev_t *mddev); void bitmap_flush(struct mddev *mddev);
void bitmap_destroy(mddev_t *mddev); void bitmap_destroy(struct mddev *mddev);
void bitmap_print_sb(struct bitmap *bitmap); void bitmap_print_sb(struct bitmap *bitmap);
void bitmap_update_sb(struct bitmap *bitmap); void bitmap_update_sb(struct bitmap *bitmap);
...@@ -262,7 +262,7 @@ void bitmap_close_sync(struct bitmap *bitmap); ...@@ -262,7 +262,7 @@ void bitmap_close_sync(struct bitmap *bitmap);
void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
void bitmap_unplug(struct bitmap *bitmap); void bitmap_unplug(struct bitmap *bitmap);
void bitmap_daemon_work(mddev_t *mddev); void bitmap_daemon_work(struct mddev *mddev);
#endif #endif
#endif #endif
...@@ -37,7 +37,7 @@ struct raid_dev { ...@@ -37,7 +37,7 @@ struct raid_dev {
*/ */
struct dm_dev *meta_dev; struct dm_dev *meta_dev;
struct dm_dev *data_dev; struct dm_dev *data_dev;
struct mdk_rdev_s rdev; struct md_rdev rdev;
}; };
/* /*
...@@ -57,7 +57,7 @@ struct raid_set { ...@@ -57,7 +57,7 @@ struct raid_set {
uint64_t print_flags; uint64_t print_flags;
struct mddev_s md; struct mddev md;
struct raid_type *raid_type; struct raid_type *raid_type;
struct dm_target_callbacks callbacks; struct dm_target_callbacks callbacks;
...@@ -594,7 +594,7 @@ struct dm_raid_superblock { ...@@ -594,7 +594,7 @@ struct dm_raid_superblock {
/* Always set to 0 when writing. */ /* Always set to 0 when writing. */
} __packed; } __packed;
static int read_disk_sb(mdk_rdev_t *rdev, int size) static int read_disk_sb(struct md_rdev *rdev, int size)
{ {
BUG_ON(!rdev->sb_page); BUG_ON(!rdev->sb_page);
...@@ -611,9 +611,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size) ...@@ -611,9 +611,9 @@ static int read_disk_sb(mdk_rdev_t *rdev, int size)
return 0; return 0;
} }
static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
{ {
mdk_rdev_t *r, *t; struct md_rdev *r, *t;
uint64_t failed_devices; uint64_t failed_devices;
struct dm_raid_superblock *sb; struct dm_raid_superblock *sb;
...@@ -651,7 +651,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -651,7 +651,7 @@ static void super_sync(mddev_t *mddev, mdk_rdev_t *rdev)
* *
* Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
*/ */
static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
{ {
int ret; int ret;
struct dm_raid_superblock *sb; struct dm_raid_superblock *sb;
...@@ -689,7 +689,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev) ...@@ -689,7 +689,7 @@ static int super_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev)
return (events_sb > events_refsb) ? 1 : 0; return (events_sb > events_refsb) ? 1 : 0;
} }
static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
{ {
int role; int role;
struct raid_set *rs = container_of(mddev, struct raid_set, md); struct raid_set *rs = container_of(mddev, struct raid_set, md);
...@@ -698,7 +698,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -698,7 +698,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
struct dm_raid_superblock *sb; struct dm_raid_superblock *sb;
uint32_t new_devs = 0; uint32_t new_devs = 0;
uint32_t rebuilds = 0; uint32_t rebuilds = 0;
mdk_rdev_t *r, *t; struct md_rdev *r, *t;
struct dm_raid_superblock *sb2; struct dm_raid_superblock *sb2;
sb = page_address(rdev->sb_page); sb = page_address(rdev->sb_page);
...@@ -809,7 +809,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -809,7 +809,7 @@ static int super_init_validation(mddev_t *mddev, mdk_rdev_t *rdev)
return 0; return 0;
} }
static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
{ {
struct dm_raid_superblock *sb = page_address(rdev->sb_page); struct dm_raid_superblock *sb = page_address(rdev->sb_page);
...@@ -849,8 +849,8 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -849,8 +849,8 @@ static int super_validate(mddev_t *mddev, mdk_rdev_t *rdev)
static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
{ {
int ret; int ret;
mdk_rdev_t *rdev, *freshest, *tmp; struct md_rdev *rdev, *freshest, *tmp;
mddev_t *mddev = &rs->md; struct mddev *mddev = &rs->md;
freshest = NULL; freshest = NULL;
rdev_for_each(rdev, tmp, mddev) { rdev_for_each(rdev, tmp, mddev) {
...@@ -1004,7 +1004,7 @@ static void raid_dtr(struct dm_target *ti) ...@@ -1004,7 +1004,7 @@ static void raid_dtr(struct dm_target *ti)
static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context) static int raid_map(struct dm_target *ti, struct bio *bio, union map_info *map_context)
{ {
struct raid_set *rs = ti->private; struct raid_set *rs = ti->private;
mddev_t *mddev = &rs->md; struct mddev *mddev = &rs->md;
mddev->pers->make_request(mddev, bio); mddev->pers->make_request(mddev, bio);
...@@ -1097,7 +1097,7 @@ static int raid_status(struct dm_target *ti, status_type_t type, ...@@ -1097,7 +1097,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
rs->md.bitmap_info.max_write_behind); rs->md.bitmap_info.max_write_behind);
if (rs->print_flags & DMPF_STRIPE_CACHE) { if (rs->print_flags & DMPF_STRIPE_CACHE) {
raid5_conf_t *conf = rs->md.private; struct r5conf *conf = rs->md.private;
/* convert from kiB to sectors */ /* convert from kiB to sectors */
DMEMIT(" stripe_cache %d", DMEMIT(" stripe_cache %d",
...@@ -1146,7 +1146,7 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -1146,7 +1146,7 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
{ {
struct raid_set *rs = ti->private; struct raid_set *rs = ti->private;
unsigned chunk_size = rs->md.chunk_sectors << 9; unsigned chunk_size = rs->md.chunk_sectors << 9;
raid5_conf_t *conf = rs->md.private; struct r5conf *conf = rs->md.private;
blk_limits_io_min(limits, chunk_size); blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
......
...@@ -81,16 +81,16 @@ static void faulty_fail(struct bio *bio, int error) ...@@ -81,16 +81,16 @@ static void faulty_fail(struct bio *bio, int error)
bio_io_error(b); bio_io_error(b);
} }
typedef struct faulty_conf { struct faulty_conf {
int period[Modes]; int period[Modes];
atomic_t counters[Modes]; atomic_t counters[Modes];
sector_t faults[MaxFault]; sector_t faults[MaxFault];
int modes[MaxFault]; int modes[MaxFault];
int nfaults; int nfaults;
mdk_rdev_t *rdev; struct md_rdev *rdev;
} conf_t; };
static int check_mode(conf_t *conf, int mode) static int check_mode(struct faulty_conf *conf, int mode)
{ {
if (conf->period[mode] == 0 && if (conf->period[mode] == 0 &&
atomic_read(&conf->counters[mode]) <= 0) atomic_read(&conf->counters[mode]) <= 0)
...@@ -105,7 +105,7 @@ static int check_mode(conf_t *conf, int mode) ...@@ -105,7 +105,7 @@ static int check_mode(conf_t *conf, int mode)
return 0; return 0;
} }
static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir) static int check_sector(struct faulty_conf *conf, sector_t start, sector_t end, int dir)
{ {
/* If we find a ReadFixable sector, we fix it ... */ /* If we find a ReadFixable sector, we fix it ... */
int i; int i;
...@@ -129,7 +129,7 @@ static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir) ...@@ -129,7 +129,7 @@ static int check_sector(conf_t *conf, sector_t start, sector_t end, int dir)
return 0; return 0;
} }
static void add_sector(conf_t *conf, sector_t start, int mode) static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
{ {
int i; int i;
int n = conf->nfaults; int n = conf->nfaults;
...@@ -169,9 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode) ...@@ -169,9 +169,9 @@ static void add_sector(conf_t *conf, sector_t start, int mode)
conf->nfaults = n+1; conf->nfaults = n+1;
} }
static int make_request(mddev_t *mddev, struct bio *bio) static int make_request(struct mddev *mddev, struct bio *bio)
{ {
conf_t *conf = mddev->private; struct faulty_conf *conf = mddev->private;
int failit = 0; int failit = 0;
if (bio_data_dir(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
...@@ -222,9 +222,9 @@ static int make_request(mddev_t *mddev, struct bio *bio) ...@@ -222,9 +222,9 @@ static int make_request(mddev_t *mddev, struct bio *bio)
} }
} }
static void status(struct seq_file *seq, mddev_t *mddev) static void status(struct seq_file *seq, struct mddev *mddev)
{ {
conf_t *conf = mddev->private; struct faulty_conf *conf = mddev->private;
int n; int n;
if ((n=atomic_read(&conf->counters[WriteTransient])) != 0) if ((n=atomic_read(&conf->counters[WriteTransient])) != 0)
...@@ -255,11 +255,11 @@ static void status(struct seq_file *seq, mddev_t *mddev) ...@@ -255,11 +255,11 @@ static void status(struct seq_file *seq, mddev_t *mddev)
} }
static int reshape(mddev_t *mddev) static int reshape(struct mddev *mddev)
{ {
int mode = mddev->new_layout & ModeMask; int mode = mddev->new_layout & ModeMask;
int count = mddev->new_layout >> ModeShift; int count = mddev->new_layout >> ModeShift;
conf_t *conf = mddev->private; struct faulty_conf *conf = mddev->private;
if (mddev->new_layout < 0) if (mddev->new_layout < 0)
return 0; return 0;
...@@ -284,7 +284,7 @@ static int reshape(mddev_t *mddev) ...@@ -284,7 +284,7 @@ static int reshape(mddev_t *mddev)
return 0; return 0;
} }
static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{ {
WARN_ONCE(raid_disks, WARN_ONCE(raid_disks,
"%s does not support generic reshape\n", __func__); "%s does not support generic reshape\n", __func__);
...@@ -295,11 +295,11 @@ static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -295,11 +295,11 @@ static sector_t faulty_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return sectors; return sectors;
} }
static int run(mddev_t *mddev) static int run(struct mddev *mddev)
{ {
mdk_rdev_t *rdev; struct md_rdev *rdev;
int i; int i;
conf_t *conf; struct faulty_conf *conf;
if (md_check_no_bitmap(mddev)) if (md_check_no_bitmap(mddev))
return -EINVAL; return -EINVAL;
...@@ -325,16 +325,16 @@ static int run(mddev_t *mddev) ...@@ -325,16 +325,16 @@ static int run(mddev_t *mddev)
return 0; return 0;
} }
static int stop(mddev_t *mddev) static int stop(struct mddev *mddev)
{ {
conf_t *conf = mddev->private; struct faulty_conf *conf = mddev->private;
kfree(conf); kfree(conf);
mddev->private = NULL; mddev->private = NULL;
return 0; return 0;
} }
static struct mdk_personality faulty_personality = static struct md_personality faulty_personality =
{ {
.name = "faulty", .name = "faulty",
.level = LEVEL_FAULTY, .level = LEVEL_FAULTY,
......
...@@ -26,10 +26,10 @@ ...@@ -26,10 +26,10 @@
/* /*
* find which device holds a particular offset * find which device holds a particular offset
*/ */
static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
{ {
int lo, mid, hi; int lo, mid, hi;
linear_conf_t *conf; struct linear_conf *conf;
lo = 0; lo = 0;
hi = mddev->raid_disks - 1; hi = mddev->raid_disks - 1;
...@@ -63,8 +63,8 @@ static int linear_mergeable_bvec(struct request_queue *q, ...@@ -63,8 +63,8 @@ static int linear_mergeable_bvec(struct request_queue *q,
struct bvec_merge_data *bvm, struct bvec_merge_data *bvm,
struct bio_vec *biovec) struct bio_vec *biovec)
{ {
mddev_t *mddev = q->queuedata; struct mddev *mddev = q->queuedata;
dev_info_t *dev0; struct dev_info *dev0;
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9; unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
...@@ -89,8 +89,8 @@ static int linear_mergeable_bvec(struct request_queue *q, ...@@ -89,8 +89,8 @@ static int linear_mergeable_bvec(struct request_queue *q,
static int linear_congested(void *data, int bits) static int linear_congested(void *data, int bits)
{ {
mddev_t *mddev = data; struct mddev *mddev = data;
linear_conf_t *conf; struct linear_conf *conf;
int i, ret = 0; int i, ret = 0;
if (mddev_congested(mddev, bits)) if (mddev_congested(mddev, bits))
...@@ -108,9 +108,9 @@ static int linear_congested(void *data, int bits) ...@@ -108,9 +108,9 @@ static int linear_congested(void *data, int bits)
return ret; return ret;
} }
static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{ {
linear_conf_t *conf; struct linear_conf *conf;
sector_t array_sectors; sector_t array_sectors;
rcu_read_lock(); rcu_read_lock();
...@@ -123,13 +123,13 @@ static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -123,13 +123,13 @@ static sector_t linear_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return array_sectors; return array_sectors;
} }
static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
{ {
linear_conf_t *conf; struct linear_conf *conf;
mdk_rdev_t *rdev; struct md_rdev *rdev;
int i, cnt; int i, cnt;
conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(struct dev_info),
GFP_KERNEL); GFP_KERNEL);
if (!conf) if (!conf)
return NULL; return NULL;
...@@ -139,7 +139,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -139,7 +139,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
list_for_each_entry(rdev, &mddev->disks, same_set) { list_for_each_entry(rdev, &mddev->disks, same_set) {
int j = rdev->raid_disk; int j = rdev->raid_disk;
dev_info_t *disk = conf->disks + j; struct dev_info *disk = conf->disks + j;
sector_t sectors; sector_t sectors;
if (j < 0 || j >= raid_disks || disk->rdev) { if (j < 0 || j >= raid_disks || disk->rdev) {
...@@ -194,9 +194,9 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) ...@@ -194,9 +194,9 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
return NULL; return NULL;
} }
static int linear_run (mddev_t *mddev) static int linear_run (struct mddev *mddev)
{ {
linear_conf_t *conf; struct linear_conf *conf;
if (md_check_no_bitmap(mddev)) if (md_check_no_bitmap(mddev))
return -EINVAL; return -EINVAL;
...@@ -213,7 +213,7 @@ static int linear_run (mddev_t *mddev) ...@@ -213,7 +213,7 @@ static int linear_run (mddev_t *mddev)
return md_integrity_register(mddev); return md_integrity_register(mddev);
} }
static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
{ {
/* Adding a drive to a linear array allows the array to grow. /* Adding a drive to a linear array allows the array to grow.
* It is permitted if the new drive has a matching superblock * It is permitted if the new drive has a matching superblock
...@@ -223,7 +223,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -223,7 +223,7 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
* The current one is never freed until the array is stopped. * The current one is never freed until the array is stopped.
* This avoids races. * This avoids races.
*/ */
linear_conf_t *newconf, *oldconf; struct linear_conf *newconf, *oldconf;
if (rdev->saved_raid_disk != mddev->raid_disks) if (rdev->saved_raid_disk != mddev->raid_disks)
return -EINVAL; return -EINVAL;
...@@ -245,9 +245,9 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -245,9 +245,9 @@ static int linear_add(mddev_t *mddev, mdk_rdev_t *rdev)
return 0; return 0;
} }
static int linear_stop (mddev_t *mddev) static int linear_stop (struct mddev *mddev)
{ {
linear_conf_t *conf = mddev->private; struct linear_conf *conf = mddev->private;
/* /*
* We do not require rcu protection here since * We do not require rcu protection here since
...@@ -264,9 +264,9 @@ static int linear_stop (mddev_t *mddev) ...@@ -264,9 +264,9 @@ static int linear_stop (mddev_t *mddev)
return 0; return 0;
} }
static int linear_make_request (mddev_t *mddev, struct bio *bio) static int linear_make_request (struct mddev *mddev, struct bio *bio)
{ {
dev_info_t *tmp_dev; struct dev_info *tmp_dev;
sector_t start_sector; sector_t start_sector;
if (unlikely(bio->bi_rw & REQ_FLUSH)) { if (unlikely(bio->bi_rw & REQ_FLUSH)) {
...@@ -323,14 +323,14 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) ...@@ -323,14 +323,14 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
return 1; return 1;
} }
static void linear_status (struct seq_file *seq, mddev_t *mddev) static void linear_status (struct seq_file *seq, struct mddev *mddev)
{ {
seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2); seq_printf(seq, " %dk rounding", mddev->chunk_sectors / 2);
} }
static struct mdk_personality linear_personality = static struct md_personality linear_personality =
{ {
.name = "linear", .name = "linear",
.level = LEVEL_LINEAR, .level = LEVEL_LINEAR,
......
...@@ -2,20 +2,14 @@ ...@@ -2,20 +2,14 @@
#define _LINEAR_H #define _LINEAR_H
struct dev_info { struct dev_info {
mdk_rdev_t *rdev; struct md_rdev *rdev;
sector_t end_sector; sector_t end_sector;
}; };
typedef struct dev_info dev_info_t; struct linear_conf
struct linear_private_data
{ {
struct rcu_head rcu; struct rcu_head rcu;
sector_t array_sectors; sector_t array_sectors;
dev_info_t disks[0]; struct dev_info disks[0];
}; };
typedef struct linear_private_data linear_conf_t;
#endif #endif
This diff is collapsed.
This diff is collapsed.
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define NR_RESERVED_BUFS 32 #define NR_RESERVED_BUFS 32
static int multipath_map (multipath_conf_t *conf) static int multipath_map (struct mpconf *conf)
{ {
int i, disks = conf->raid_disks; int i, disks = conf->raid_disks;
...@@ -42,7 +42,7 @@ static int multipath_map (multipath_conf_t *conf) ...@@ -42,7 +42,7 @@ static int multipath_map (multipath_conf_t *conf)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags)) { if (rdev && test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
...@@ -58,8 +58,8 @@ static int multipath_map (multipath_conf_t *conf) ...@@ -58,8 +58,8 @@ static int multipath_map (multipath_conf_t *conf)
static void multipath_reschedule_retry (struct multipath_bh *mp_bh) static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{ {
unsigned long flags; unsigned long flags;
mddev_t *mddev = mp_bh->mddev; struct mddev *mddev = mp_bh->mddev;
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
list_add(&mp_bh->retry_list, &conf->retry_list); list_add(&mp_bh->retry_list, &conf->retry_list);
...@@ -76,7 +76,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh) ...@@ -76,7 +76,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err) static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
{ {
struct bio *bio = mp_bh->master_bio; struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mp_bh->mddev->private; struct mpconf *conf = mp_bh->mddev->private;
bio_endio(bio, err); bio_endio(bio, err);
mempool_free(mp_bh, conf->pool); mempool_free(mp_bh, conf->pool);
...@@ -86,8 +86,8 @@ static void multipath_end_request(struct bio *bio, int error) ...@@ -86,8 +86,8 @@ static void multipath_end_request(struct bio *bio, int error)
{ {
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh *mp_bh = bio->bi_private; struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private; struct mpconf *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev; struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
if (uptodate) if (uptodate)
multipath_end_bh_io(mp_bh, 0); multipath_end_bh_io(mp_bh, 0);
...@@ -106,9 +106,9 @@ static void multipath_end_request(struct bio *bio, int error) ...@@ -106,9 +106,9 @@ static void multipath_end_request(struct bio *bio, int error)
rdev_dec_pending(rdev, conf->mddev); rdev_dec_pending(rdev, conf->mddev);
} }
static int multipath_make_request(mddev_t *mddev, struct bio * bio) static int multipath_make_request(struct mddev *mddev, struct bio * bio)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
struct multipath_bh * mp_bh; struct multipath_bh * mp_bh;
struct multipath_info *multipath; struct multipath_info *multipath;
...@@ -140,9 +140,9 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) ...@@ -140,9 +140,9 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
return 0; return 0;
} }
static void multipath_status (struct seq_file *seq, mddev_t *mddev) static void multipath_status (struct seq_file *seq, struct mddev *mddev)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
int i; int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks, seq_printf (seq, " [%d/%d] [", conf->raid_disks,
...@@ -156,8 +156,8 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev) ...@@ -156,8 +156,8 @@ static void multipath_status (struct seq_file *seq, mddev_t *mddev)
static int multipath_congested(void *data, int bits) static int multipath_congested(void *data, int bits)
{ {
mddev_t *mddev = data; struct mddev *mddev = data;
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
int i, ret = 0; int i, ret = 0;
if (mddev_congested(mddev, bits)) if (mddev_congested(mddev, bits))
...@@ -165,7 +165,7 @@ static int multipath_congested(void *data, int bits) ...@@ -165,7 +165,7 @@ static int multipath_congested(void *data, int bits)
rcu_read_lock(); rcu_read_lock();
for (i = 0; i < mddev->raid_disks ; i++) { for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev); struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) { if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev); struct request_queue *q = bdev_get_queue(rdev->bdev);
...@@ -183,9 +183,9 @@ static int multipath_congested(void *data, int bits) ...@@ -183,9 +183,9 @@ static int multipath_congested(void *data, int bits)
/* /*
* Careful, this can execute in IRQ contexts as well! * Careful, this can execute in IRQ contexts as well!
*/ */
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) static void multipath_error (struct mddev *mddev, struct md_rdev *rdev)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (conf->raid_disks - mddev->degraded <= 1) { if (conf->raid_disks - mddev->degraded <= 1) {
...@@ -218,7 +218,7 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -218,7 +218,7 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
conf->raid_disks - mddev->degraded); conf->raid_disks - mddev->degraded);
} }
static void print_multipath_conf (multipath_conf_t *conf) static void print_multipath_conf (struct mpconf *conf)
{ {
int i; int i;
struct multipath_info *tmp; struct multipath_info *tmp;
...@@ -242,9 +242,9 @@ static void print_multipath_conf (multipath_conf_t *conf) ...@@ -242,9 +242,9 @@ static void print_multipath_conf (multipath_conf_t *conf)
} }
static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
struct request_queue *q; struct request_queue *q;
int err = -EEXIST; int err = -EEXIST;
int path; int path;
...@@ -291,11 +291,11 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) ...@@ -291,11 +291,11 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
return err; return err;
} }
static int multipath_remove_disk(mddev_t *mddev, int number) static int multipath_remove_disk(struct mddev *mddev, int number)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
int err = 0; int err = 0;
mdk_rdev_t *rdev; struct md_rdev *rdev;
struct multipath_info *p = conf->multipaths + number; struct multipath_info *p = conf->multipaths + number;
print_multipath_conf(conf); print_multipath_conf(conf);
...@@ -335,12 +335,12 @@ static int multipath_remove_disk(mddev_t *mddev, int number) ...@@ -335,12 +335,12 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
* 3. Performs writes following reads for array syncronising. * 3. Performs writes following reads for array syncronising.
*/ */
static void multipathd (mddev_t *mddev) static void multipathd (struct mddev *mddev)
{ {
struct multipath_bh *mp_bh; struct multipath_bh *mp_bh;
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
struct list_head *head = &conf->retry_list; struct list_head *head = &conf->retry_list;
md_check_recovery(mddev); md_check_recovery(mddev);
...@@ -379,7 +379,7 @@ static void multipathd (mddev_t *mddev) ...@@ -379,7 +379,7 @@ static void multipathd (mddev_t *mddev)
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
} }
static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) static sector_t multipath_size(struct mddev *mddev, sector_t sectors, int raid_disks)
{ {
WARN_ONCE(sectors || raid_disks, WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__); "%s does not support generic reshape\n", __func__);
...@@ -387,12 +387,12 @@ static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks) ...@@ -387,12 +387,12 @@ static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks)
return mddev->dev_sectors; return mddev->dev_sectors;
} }
static int multipath_run (mddev_t *mddev) static int multipath_run (struct mddev *mddev)
{ {
multipath_conf_t *conf; struct mpconf *conf;
int disk_idx; int disk_idx;
struct multipath_info *disk; struct multipath_info *disk;
mdk_rdev_t *rdev; struct md_rdev *rdev;
int working_disks; int working_disks;
if (md_check_no_bitmap(mddev)) if (md_check_no_bitmap(mddev))
...@@ -409,7 +409,7 @@ static int multipath_run (mddev_t *mddev) ...@@ -409,7 +409,7 @@ static int multipath_run (mddev_t *mddev)
* should be freed in multipath_stop()] * should be freed in multipath_stop()]
*/ */
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); conf = kzalloc(sizeof(struct mpconf), GFP_KERNEL);
mddev->private = conf; mddev->private = conf;
if (!conf) { if (!conf) {
printk(KERN_ERR printk(KERN_ERR
...@@ -510,9 +510,9 @@ static int multipath_run (mddev_t *mddev) ...@@ -510,9 +510,9 @@ static int multipath_run (mddev_t *mddev)
} }
static int multipath_stop (mddev_t *mddev) static int multipath_stop (struct mddev *mddev)
{ {
multipath_conf_t *conf = mddev->private; struct mpconf *conf = mddev->private;
md_unregister_thread(&mddev->thread); md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
...@@ -523,7 +523,7 @@ static int multipath_stop (mddev_t *mddev) ...@@ -523,7 +523,7 @@ static int multipath_stop (mddev_t *mddev)
return 0; return 0;
} }
static struct mdk_personality multipath_personality = static struct md_personality multipath_personality =
{ {
.name = "multipath", .name = "multipath",
.level = LEVEL_MULTIPATH, .level = LEVEL_MULTIPATH,
......
...@@ -2,11 +2,11 @@ ...@@ -2,11 +2,11 @@
#define _MULTIPATH_H #define _MULTIPATH_H
struct multipath_info { struct multipath_info {
mdk_rdev_t *rdev; struct md_rdev *rdev;
}; };
struct multipath_private_data { struct mpconf {
mddev_t *mddev; struct mddev *mddev;
struct multipath_info *multipaths; struct multipath_info *multipaths;
int raid_disks; int raid_disks;
spinlock_t device_lock; spinlock_t device_lock;
...@@ -15,8 +15,6 @@ struct multipath_private_data { ...@@ -15,8 +15,6 @@ struct multipath_private_data {
mempool_t *pool; mempool_t *pool;
}; };
typedef struct multipath_private_data multipath_conf_t;
/* /*
* this is our 'private' 'collective' MULTIPATH buffer head. * this is our 'private' 'collective' MULTIPATH buffer head.
* it contains information about what kind of IO operations were started * it contains information about what kind of IO operations were started
...@@ -24,7 +22,7 @@ typedef struct multipath_private_data multipath_conf_t; ...@@ -24,7 +22,7 @@ typedef struct multipath_private_data multipath_conf_t;
*/ */
struct multipath_bh { struct multipath_bh {
mddev_t *mddev; struct mddev *mddev;
struct bio *master_bio; struct bio *master_bio;
struct bio bio; struct bio bio;
int path; int path;
......
This diff is collapsed.
#ifndef _RAID0_H #ifndef _RAID0_H
#define _RAID0_H #define _RAID0_H
struct strip_zone struct strip_zone {
{
sector_t zone_end; /* Start of the next zone (in sectors) */ sector_t zone_end; /* Start of the next zone (in sectors) */
sector_t dev_start; /* Zone offset in real dev (in sectors) */ sector_t dev_start; /* Zone offset in real dev (in sectors) */
int nb_dev; /* # of devices attached to the zone */ int nb_dev; /* # of devices attached to the zone */
}; };
struct raid0_private_data struct r0conf {
{
struct strip_zone *strip_zone; struct strip_zone *strip_zone;
mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ struct md_rdev **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
int nr_strip_zones; int nr_strip_zones;
}; };
typedef struct raid0_private_data raid0_conf_t;
#endif #endif
This diff is collapsed.
#ifndef _RAID1_H #ifndef _RAID1_H
#define _RAID1_H #define _RAID1_H
typedef struct mirror_info mirror_info_t;
struct mirror_info { struct mirror_info {
mdk_rdev_t *rdev; struct md_rdev *rdev;
sector_t head_position; sector_t head_position;
}; };
...@@ -17,61 +15,82 @@ struct mirror_info { ...@@ -17,61 +15,82 @@ struct mirror_info {
*/ */
struct pool_info { struct pool_info {
mddev_t *mddev; struct mddev *mddev;
int raid_disks; int raid_disks;
}; };
struct r1conf {
typedef struct r1bio_s r1bio_t; struct mddev *mddev;
struct mirror_info *mirrors;
struct r1_private_data_s {
mddev_t *mddev;
mirror_info_t *mirrors;
int raid_disks; int raid_disks;
/* When choose the best device for a read (read_balance())
* we try to keep sequential reads one the same device
* using 'last_used' and 'next_seq_sect'
*/
int last_used; int last_used;
sector_t next_seq_sect; sector_t next_seq_sect;
/* During resync, read_balancing is only allowed on the part
* of the array that has been resynced. 'next_resync' tells us
* where that is.
*/
sector_t next_resync;
spinlock_t device_lock; spinlock_t device_lock;
/* list of 'struct r1bio' that need to be processed by raid1d,
* whether to retry a read, writeout a resync or recovery
* block, or anything else.
*/
struct list_head retry_list; struct list_head retry_list;
/* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list;
/* for use when syncing mirrors: */ /* queue pending writes to be submitted on unplug */
struct bio_list pending_bio_list;
int pending_count;
/* for use when syncing mirrors:
* We don't allow both normal IO and resync/recovery IO at
* the same time - resync/recovery can only happen when there
* is no other IO. So when either is active, the other has to wait.
* See more details description in raid1.c near raise_barrier().
*/
wait_queue_head_t wait_barrier;
spinlock_t resync_lock; spinlock_t resync_lock;
int nr_pending; int nr_pending;
int nr_waiting; int nr_waiting;
int nr_queued; int nr_queued;
int barrier; int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
* (fresh device added).
* Cleared when a sync completes.
*/
int recovery_disabled; /* when the same as
* mddev->recovery_disabled
* we don't allow recovery
* to be attempted as we
* expect a read error
*/
wait_queue_head_t wait_barrier; /* Set to 1 if a full sync is needed, (fresh device added).
* Cleared when a sync completes.
*/
int fullsync;
/* When the same as mddev->recovery_disabled we don't allow
* recovery to be attempted as we expect a read error.
*/
int recovery_disabled;
/* poolinfo contains information about the content of the
* mempools - it changes when the array grows or shrinks
*/
struct pool_info *poolinfo; struct pool_info *poolinfo;
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
/* temporary buffer to synchronous IO when attempting to repair
* a read error.
*/
struct page *tmppage; struct page *tmppage;
mempool_t *r1bio_pool;
mempool_t *r1buf_pool;
/* When taking over an array from a different personality, we store /* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array. * the new thread here until we fully activate the array.
*/ */
struct mdk_thread_s *thread; struct md_thread *thread;
}; };
typedef struct r1_private_data_s conf_t;
/* /*
* this is our 'private' RAID1 bio. * this is our 'private' RAID1 bio.
* *
...@@ -79,7 +98,7 @@ typedef struct r1_private_data_s conf_t; ...@@ -79,7 +98,7 @@ typedef struct r1_private_data_s conf_t;
* for this RAID1 operation, and about their status: * for this RAID1 operation, and about their status:
*/ */
struct r1bio_s { struct r1bio {
atomic_t remaining; /* 'have we finished' count, atomic_t remaining; /* 'have we finished' count,
* used from IRQ handlers * used from IRQ handlers
*/ */
...@@ -89,7 +108,7 @@ struct r1bio_s { ...@@ -89,7 +108,7 @@ struct r1bio_s {
sector_t sector; sector_t sector;
int sectors; int sectors;
unsigned long state; unsigned long state;
mddev_t *mddev; struct mddev *mddev;
/* /*
* original bio going to /dev/mdx * original bio going to /dev/mdx
*/ */
...@@ -148,6 +167,6 @@ struct r1bio_s { ...@@ -148,6 +167,6 @@ struct r1bio_s {
#define R1BIO_MadeGood 7 #define R1BIO_MadeGood 7
#define R1BIO_WriteError 8 #define R1BIO_WriteError 8
extern int md_raid1_congested(mddev_t *mddev, int bits); extern int md_raid1_congested(struct mddev *mddev, int bits);
#endif #endif
This diff is collapsed.
#ifndef _RAID10_H #ifndef _RAID10_H
#define _RAID10_H #define _RAID10_H
typedef struct mirror_info mirror_info_t;
struct mirror_info { struct mirror_info {
mdk_rdev_t *rdev; struct md_rdev *rdev;
sector_t head_position; sector_t head_position;
int recovery_disabled; /* matches int recovery_disabled; /* matches
* mddev->recovery_disabled * mddev->recovery_disabled
...@@ -13,11 +11,9 @@ struct mirror_info { ...@@ -13,11 +11,9 @@ struct mirror_info {
*/ */
}; };
typedef struct r10bio_s r10bio_t; struct r10conf {
struct mddev *mddev;
struct r10_private_data_s { struct mirror_info *mirrors;
mddev_t *mddev;
mirror_info_t *mirrors;
int raid_disks; int raid_disks;
spinlock_t device_lock; spinlock_t device_lock;
...@@ -46,7 +42,7 @@ struct r10_private_data_s { ...@@ -46,7 +42,7 @@ struct r10_private_data_s {
struct list_head retry_list; struct list_head retry_list;
/* queue pending writes and submit them on unplug */ /* queue pending writes and submit them on unplug */
struct bio_list pending_bio_list; struct bio_list pending_bio_list;
int pending_count;
spinlock_t resync_lock; spinlock_t resync_lock;
int nr_pending; int nr_pending;
...@@ -68,11 +64,9 @@ struct r10_private_data_s { ...@@ -68,11 +64,9 @@ struct r10_private_data_s {
/* When taking over an array from a different personality, we store /* When taking over an array from a different personality, we store
* the new thread here until we fully activate the array. * the new thread here until we fully activate the array.
*/ */
struct mdk_thread_s *thread; struct md_thread *thread;
}; };
typedef struct r10_private_data_s conf_t;
/* /*
* this is our 'private' RAID10 bio. * this is our 'private' RAID10 bio.
* *
...@@ -80,14 +74,14 @@ typedef struct r10_private_data_s conf_t; ...@@ -80,14 +74,14 @@ typedef struct r10_private_data_s conf_t;
* for this RAID10 operation, and about their status: * for this RAID10 operation, and about their status:
*/ */
struct r10bio_s { struct r10bio {
atomic_t remaining; /* 'have we finished' count, atomic_t remaining; /* 'have we finished' count,
* used from IRQ handlers * used from IRQ handlers
*/ */
sector_t sector; /* virtual sector number */ sector_t sector; /* virtual sector number */
int sectors; int sectors;
unsigned long state; unsigned long state;
mddev_t *mddev; struct mddev *mddev;
/* /*
* original bio going to /dev/mdx * original bio going to /dev/mdx
*/ */
......
This diff is collapsed.
This diff is collapsed.
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* ----------------------------------------------------------------------- */ * ----------------------------------------------------------------------- */
/* /*
* raid6int$#.c * int$#.c
* *
* $#-way unrolled portable integer math RAID-6 instruction set * $#-way unrolled portable integer math RAID-6 instruction set
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment