Commit a0edc56a authored by Neil Brown's avatar Neil Brown Committed by Linus Torvalds

[PATCH] md 2 of 22 - Make device plugging work for md/raid5

We embed a request_queue_t in the mddev structure and so
have a separate one for each mddev.
This is used for plugging (in raid5).

Given this embeded request_queue_t, md_make_request no-longer
needs to make from device number to mddev, but can map from
the queue to the mddev instead.
parent 9beeab32
...@@ -172,7 +172,7 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev) ...@@ -172,7 +172,7 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev)
static int md_make_request (request_queue_t *q, struct bio *bio) static int md_make_request (request_queue_t *q, struct bio *bio)
{ {
mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev)); mddev_t *mddev = q->queuedata;
if (mddev && mddev->pers) if (mddev && mddev->pers)
return mddev->pers->make_request(mddev, bio_rw(bio), bio); return mddev->pers->make_request(mddev, bio_rw(bio), bio);
...@@ -182,6 +182,12 @@ static int md_make_request (request_queue_t *q, struct bio *bio) ...@@ -182,6 +182,12 @@ static int md_make_request (request_queue_t *q, struct bio *bio)
} }
} }
static int md_fail_request (request_queue_t *q, struct bio *bio)
{
bio_io_error(bio);
return 0;
}
static mddev_t * alloc_mddev(kdev_t dev) static mddev_t * alloc_mddev(kdev_t dev)
{ {
mddev_t *mddev; mddev_t *mddev;
...@@ -1711,6 +1717,9 @@ static int do_md_run(mddev_t * mddev) ...@@ -1711,6 +1717,9 @@ static int do_md_run(mddev_t * mddev)
} }
mddev->pers = pers[pnum]; mddev->pers = pers[pnum];
blk_queue_make_request(&mddev->queue, md_make_request);
mddev->queue.queuedata = mddev;
err = mddev->pers->run(mddev); err = mddev->pers->run(mddev);
if (err) { if (err) {
printk(KERN_ERR "md: pers->run() failed ...\n"); printk(KERN_ERR "md: pers->run() failed ...\n");
...@@ -3616,6 +3625,15 @@ static void md_geninit(void) ...@@ -3616,6 +3625,15 @@ static void md_geninit(void)
#endif #endif
} }
request_queue_t * md_queue_proc(kdev_t dev)
{
mddev_t *mddev = kdev_to_mddev(dev);
if (mddev == NULL)
return BLK_DEFAULT_QUEUE(MAJOR_NR);
else
return &mddev->queue;
}
int __init md_init(void) int __init md_init(void)
{ {
static char * name = "mdrecoveryd"; static char * name = "mdrecoveryd";
...@@ -3640,8 +3658,9 @@ int __init md_init(void) ...@@ -3640,8 +3658,9 @@ int __init md_init(void)
S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL); S_IFBLK | S_IRUSR | S_IWUSR, &md_fops, NULL);
} }
/* forward all md request to md_make_request */ /* all requests on an uninitialised device get failed... */
blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_make_request); blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), md_fail_request);
blk_dev[MAJOR_NR].queue = md_queue_proc;
add_gendisk(&md_gendisk); add_gendisk(&md_gendisk);
......
...@@ -1225,14 +1225,15 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf) ...@@ -1225,14 +1225,15 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf)
} }
static void raid5_unplug_device(void *data) static void raid5_unplug_device(void *data)
{ {
raid5_conf_t *conf = (raid5_conf_t *)data; request_queue_t *q = data;
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags); spin_lock_irqsave(&conf->device_lock, flags);
raid5_activate_delayed(conf); if (blk_remove_plug(q))
raid5_activate_delayed(conf);
conf->plugged = 0;
md_wakeup_thread(conf->thread); md_wakeup_thread(conf->thread);
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
...@@ -1241,17 +1242,13 @@ static void raid5_unplug_device(void *data) ...@@ -1241,17 +1242,13 @@ static void raid5_unplug_device(void *data)
static inline void raid5_plug_device(raid5_conf_t *conf) static inline void raid5_plug_device(raid5_conf_t *conf)
{ {
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
if (list_empty(&conf->delayed_list)) blk_plug_device(&conf->mddev->queue);
if (!conf->plugged) {
conf->plugged = 1;
queue_task(&conf->plug_tq, &tq_disk);
}
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
} }
static int make_request (mddev_t *mddev, int rw, struct bio * bi) static int make_request (mddev_t *mddev, int rw, struct bio * bi)
{ {
raid5_conf_t *conf = (raid5_conf_t *) mddev->private; raid5_conf_t *conf = mddev_to_conf(mddev);
const unsigned int raid_disks = conf->raid_disks; const unsigned int raid_disks = conf->raid_disks;
const unsigned int data_disks = raid_disks - 1; const unsigned int data_disks = raid_disks - 1;
unsigned int dd_idx, pd_idx; unsigned int dd_idx, pd_idx;
...@@ -1352,7 +1349,7 @@ static void raid5d (void *data) ...@@ -1352,7 +1349,7 @@ static void raid5d (void *data)
if (list_empty(&conf->handle_list) && if (list_empty(&conf->handle_list) &&
atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
!conf->plugged && !blk_queue_plugged(&mddev->queue) &&
!list_empty(&conf->delayed_list)) !list_empty(&conf->delayed_list))
raid5_activate_delayed(conf); raid5_activate_delayed(conf);
...@@ -1443,10 +1440,7 @@ static int run (mddev_t *mddev) ...@@ -1443,10 +1440,7 @@ static int run (mddev_t *mddev)
atomic_set(&conf->active_stripes, 0); atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0); atomic_set(&conf->preread_active_stripes, 0);
conf->plugged = 0; mddev->queue.unplug_fn = raid5_unplug_device;
conf->plug_tq.sync = 0;
conf->plug_tq.routine = &raid5_unplug_device;
conf->plug_tq.data = conf;
PRINTK("raid5: run(md%d) called.\n", mdidx(mddev)); PRINTK("raid5: run(md%d) called.\n", mdidx(mddev));
......
...@@ -214,6 +214,8 @@ struct mddev_s ...@@ -214,6 +214,8 @@ struct mddev_s
atomic_t recovery_active; /* blocks scheduled, but not written */ atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait; wait_queue_head_t recovery_wait;
request_queue_t queue; /* for plugging ... */
struct list_head all_mddevs; struct list_head all_mddevs;
}; };
......
...@@ -176,7 +176,7 @@ struct stripe_head { ...@@ -176,7 +176,7 @@ struct stripe_head {
* is put on a "delayed" queue until there are no stripes currently * is put on a "delayed" queue until there are no stripes currently
* in a pre-read phase. Further, if the "delayed" queue is empty when * in a pre-read phase. Further, if the "delayed" queue is empty when
* a stripe is put on it then we "plug" the queue and do not process it * a stripe is put on it then we "plug" the queue and do not process it
* until an unplg call is made. (the tq_disk list is run). * until an unplug call is made. (blk_run_queues is run).
* *
* When preread is initiated on a stripe, we set PREREAD_ACTIVE and add * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
* it to the count of prereading stripes. * it to the count of prereading stripes.
...@@ -228,9 +228,6 @@ struct raid5_private_data { ...@@ -228,9 +228,6 @@ struct raid5_private_data {
* waiting for 25% to be free * waiting for 25% to be free
*/ */
spinlock_t device_lock; spinlock_t device_lock;
int plugged;
struct tq_struct plug_tq;
}; };
typedef struct raid5_private_data raid5_conf_t; typedef struct raid5_private_data raid5_conf_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment