Commit e54f77dd authored by Chandra Seetharaman's avatar Chandra Seetharaman Committed by Alasdair G Kergon

dm mpath: call activate fn for each path in pg_init

Fixed a problem affecting reinstatement of passive paths.

Before we moved the hardware handler from dm to SCSI, it performed a pg_init
for a path group and didn't maintain any state about each path in hardware
handler code.

But in SCSI dh, such state is now maintained, as we want to fail I/O early on a
path if it is not the active path.

All the hardware handlers have a state now and set to active or some form of
inactive.  They have prep_fn() which uses this state to fail the I/O without
it ever being sent to the device.

So in effect when dm-multipath calls scsi_dh_activate(), activate is
sent to only one path and the "state" of that path is changed appropriately
to "active" while other paths in the same path group are never changed
as they never got an "activate".

In order make sure all the paths in a path group gets their state set
properly when a pg_init happens, we need to call scsi_dh_activate() on
all paths in a path group.

Doing this at the hardware handler layer is not a good option as we
want the multipath layer to define the relationship between path and path
groups and not the hardware handler.

Attached patch sends an "activate" on each path in a path group when a
path group is switched. It also sends an activate when a path is reinstated.
Signed-off-by: default avatarChandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent a0cf7ea9
...@@ -35,6 +35,7 @@ struct pgpath { ...@@ -35,6 +35,7 @@ struct pgpath {
struct dm_path path; struct dm_path path;
struct work_struct deactivate_path; struct work_struct deactivate_path;
struct work_struct activate_path;
}; };
#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
...@@ -64,8 +65,6 @@ struct multipath { ...@@ -64,8 +65,6 @@ struct multipath {
spinlock_t lock; spinlock_t lock;
const char *hw_handler_name; const char *hw_handler_name;
struct work_struct activate_path;
struct pgpath *pgpath_to_activate;
unsigned nr_priority_groups; unsigned nr_priority_groups;
struct list_head priority_groups; struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */ unsigned pg_init_required; /* pg_init needs calling? */
...@@ -128,6 +127,7 @@ static struct pgpath *alloc_pgpath(void) ...@@ -128,6 +127,7 @@ static struct pgpath *alloc_pgpath(void)
if (pgpath) { if (pgpath) {
pgpath->is_active = 1; pgpath->is_active = 1;
INIT_WORK(&pgpath->deactivate_path, deactivate_path); INIT_WORK(&pgpath->deactivate_path, deactivate_path);
INIT_WORK(&pgpath->activate_path, activate_path);
} }
return pgpath; return pgpath;
...@@ -160,7 +160,6 @@ static struct priority_group *alloc_priority_group(void) ...@@ -160,7 +160,6 @@ static struct priority_group *alloc_priority_group(void)
static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
{ {
unsigned long flags;
struct pgpath *pgpath, *tmp; struct pgpath *pgpath, *tmp;
struct multipath *m = ti->private; struct multipath *m = ti->private;
...@@ -169,10 +168,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) ...@@ -169,10 +168,6 @@ static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
if (m->hw_handler_name) if (m->hw_handler_name)
scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev));
dm_put_device(ti, pgpath->path.dev); dm_put_device(ti, pgpath->path.dev);
spin_lock_irqsave(&m->lock, flags);
if (m->pgpath_to_activate == pgpath)
m->pgpath_to_activate = NULL;
spin_unlock_irqrestore(&m->lock, flags);
free_pgpath(pgpath); free_pgpath(pgpath);
} }
} }
...@@ -202,7 +197,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti) ...@@ -202,7 +197,6 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1; m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios); INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event); INIT_WORK(&m->trigger_event, trigger_event);
INIT_WORK(&m->activate_path, activate_path);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) { if (!m->mpio_pool) {
kfree(m); kfree(m);
...@@ -427,8 +421,8 @@ static void process_queued_ios(struct work_struct *work) ...@@ -427,8 +421,8 @@ static void process_queued_ios(struct work_struct *work)
{ {
struct multipath *m = struct multipath *m =
container_of(work, struct multipath, process_queued_ios); container_of(work, struct multipath, process_queued_ios);
struct pgpath *pgpath = NULL; struct pgpath *pgpath = NULL, *tmp;
unsigned init_required = 0, must_queue = 1; unsigned must_queue = 1;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
...@@ -446,19 +440,15 @@ static void process_queued_ios(struct work_struct *work) ...@@ -446,19 +440,15 @@ static void process_queued_ios(struct work_struct *work)
must_queue = 0; must_queue = 0;
if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
m->pgpath_to_activate = pgpath;
m->pg_init_count++; m->pg_init_count++;
m->pg_init_required = 0; m->pg_init_required = 0;
m->pg_init_in_progress = 1; list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
init_required = 1; if (queue_work(kmpath_handlerd, &tmp->activate_path))
m->pg_init_in_progress++;
}
} }
out: out:
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
if (init_required)
queue_work(kmpath_handlerd, &m->activate_path);
if (!must_queue) if (!must_queue)
dispatch_queued_ios(m); dispatch_queued_ios(m);
} }
...@@ -946,9 +936,13 @@ static int reinstate_path(struct pgpath *pgpath) ...@@ -946,9 +936,13 @@ static int reinstate_path(struct pgpath *pgpath)
pgpath->is_active = 1; pgpath->is_active = 1;
if (!m->nr_valid_paths++ && m->queue_size) {
m->current_pgpath = NULL; m->current_pgpath = NULL;
if (!m->nr_valid_paths++ && m->queue_size)
queue_work(kmultipathd, &m->process_queued_ios); queue_work(kmultipathd, &m->process_queued_ios);
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
if (queue_work(kmpath_handlerd, &pgpath->activate_path))
m->pg_init_in_progress++;
}
dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
pgpath->path.dev->name, m->nr_valid_paths); pgpath->path.dev->name, m->nr_valid_paths);
...@@ -1124,15 +1118,18 @@ static void pg_init_done(struct dm_path *path, int errors) ...@@ -1124,15 +1118,18 @@ static void pg_init_done(struct dm_path *path, int errors)
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
if (errors) { if (errors) {
if (pgpath == m->current_pgpath) {
DMERR("Could not failover device. Error %d.", errors); DMERR("Could not failover device. Error %d.", errors);
m->current_pgpath = NULL; m->current_pgpath = NULL;
m->current_pg = NULL; m->current_pg = NULL;
}
} else if (!m->pg_init_required) { } else if (!m->pg_init_required) {
m->queue_io = 0; m->queue_io = 0;
pg->bypassed = 0; pg->bypassed = 0;
} }
m->pg_init_in_progress = 0; m->pg_init_in_progress--;
if (!m->pg_init_in_progress)
queue_work(kmultipathd, &m->process_queued_ios); queue_work(kmultipathd, &m->process_queued_ios);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
} }
...@@ -1140,19 +1137,11 @@ static void pg_init_done(struct dm_path *path, int errors) ...@@ -1140,19 +1137,11 @@ static void pg_init_done(struct dm_path *path, int errors)
static void activate_path(struct work_struct *work) static void activate_path(struct work_struct *work)
{ {
int ret; int ret;
struct multipath *m = struct pgpath *pgpath =
container_of(work, struct multipath, activate_path); container_of(work, struct pgpath, activate_path);
struct dm_path *path;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags); ret = scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev));
path = &m->pgpath_to_activate->path; pg_init_done(&pgpath->path, ret);
m->pgpath_to_activate = NULL;
spin_unlock_irqrestore(&m->lock, flags);
if (!path)
return;
ret = scsi_dh_activate(bdev_get_queue(path->dev->bdev));
pg_init_done(path, ret);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment