Commit 518257b1 authored by Mike Snitzer's avatar Mike Snitzer

dm mpath: switch to using bitops for state flags

Mechanical change that doesn't make any real effort to reduce the use of
m->lock; that will come later (once atomics are used for counters, etc).
Suggested-by: default avatarHannes Reinecke <hare@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Tested-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 813923b1
...@@ -83,13 +83,7 @@ struct multipath { ...@@ -83,13 +83,7 @@ struct multipath {
struct priority_group *current_pg; struct priority_group *current_pg;
struct priority_group *next_pg; /* Switch to this PG if set */ struct priority_group *next_pg; /* Switch to this PG if set */
bool queue_io:1; /* Must we queue all I/O? */ unsigned long flags; /* Multipath state flags */
bool queue_if_no_path:1; /* Queue I/O if last path fails? */
bool saved_queue_if_no_path:1; /* Saved state during suspension */
bool retain_attached_hw_handler:1; /* If there's already a hw_handler present, don't change it. */
bool pg_init_disabled:1; /* pg_init is not currently allowed */
bool pg_init_required:1; /* pg_init needs calling? */
bool pg_init_delay_retry:1; /* Delay pg_init retry? */
unsigned pg_init_retries; /* Number of times to retry pg_init */ unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */ unsigned pg_init_count; /* Number of times pg_init called */
...@@ -122,6 +116,17 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; ...@@ -122,6 +116,17 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
static void trigger_event(struct work_struct *work); static void trigger_event(struct work_struct *work);
static void activate_path(struct work_struct *work); static void activate_path(struct work_struct *work);
/*-----------------------------------------------
* Multipath state flags.
*-----------------------------------------------*/
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
/*----------------------------------------------- /*-----------------------------------------------
* Allocation routines * Allocation routines
...@@ -189,7 +194,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq) ...@@ -189,7 +194,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
if (m) { if (m) {
INIT_LIST_HEAD(&m->priority_groups); INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock); spin_lock_init(&m->lock);
m->queue_io = true; set_bit(MPATHF_QUEUE_IO, &m->flags);
m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
INIT_WORK(&m->trigger_event, trigger_event); INIT_WORK(&m->trigger_event, trigger_event);
init_waitqueue_head(&m->pg_init_wait); init_waitqueue_head(&m->pg_init_wait);
...@@ -274,17 +279,17 @@ static int __pg_init_all_paths(struct multipath *m) ...@@ -274,17 +279,17 @@ static int __pg_init_all_paths(struct multipath *m)
struct pgpath *pgpath; struct pgpath *pgpath;
unsigned long pg_init_delay = 0; unsigned long pg_init_delay = 0;
if (m->pg_init_in_progress || m->pg_init_disabled) if (m->pg_init_in_progress || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
return 0; return 0;
m->pg_init_count++; m->pg_init_count++;
m->pg_init_required = false; clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
/* Check here to reset pg_init_required */ /* Check here to reset pg_init_required */
if (!m->current_pg) if (!m->current_pg)
return 0; return 0;
if (m->pg_init_delay_retry) if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
...@@ -304,11 +309,11 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath) ...@@ -304,11 +309,11 @@ static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
/* Must we initialise the PG first, and queue I/O till it's ready? */ /* Must we initialise the PG first, and queue I/O till it's ready? */
if (m->hw_handler_name) { if (m->hw_handler_name) {
m->pg_init_required = true; set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
m->queue_io = true; set_bit(MPATHF_QUEUE_IO, &m->flags);
} else { } else {
m->pg_init_required = false; clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
m->queue_io = false; clear_bit(MPATHF_QUEUE_IO, &m->flags);
} }
m->pg_init_count = 0; m->pg_init_count = 0;
...@@ -337,7 +342,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -337,7 +342,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
bool bypassed = true; bool bypassed = true;
if (!m->nr_valid_paths) { if (!m->nr_valid_paths) {
m->queue_io = false; clear_bit(MPATHF_QUEUE_IO, &m->flags);
goto failed; goto failed;
} }
...@@ -365,7 +370,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -365,7 +370,7 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
continue; continue;
if (!__choose_path_in_pg(m, pg, nr_bytes)) { if (!__choose_path_in_pg(m, pg, nr_bytes)) {
if (!bypassed) if (!bypassed)
m->pg_init_delay_retry = true; set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
return; return;
} }
} }
...@@ -389,8 +394,9 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes) ...@@ -389,8 +394,9 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
*/ */
static int __must_push_back(struct multipath *m) static int __must_push_back(struct multipath *m)
{ {
return (m->queue_if_no_path || return (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) ||
(m->queue_if_no_path != m->saved_queue_if_no_path && ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) !=
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) &&
dm_noflush_suspending(m->ti))); dm_noflush_suspending(m->ti)));
} }
...@@ -411,7 +417,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, ...@@ -411,7 +417,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
spin_lock_irq(&m->lock); spin_lock_irq(&m->lock);
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
if (!m->current_pgpath || !m->queue_io) if (!m->current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
__choose_pgpath(m, nr_bytes); __choose_pgpath(m, nr_bytes);
pgpath = m->current_pgpath; pgpath = m->current_pgpath;
...@@ -420,7 +426,8 @@ static int __multipath_map(struct dm_target *ti, struct request *clone, ...@@ -420,7 +426,8 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
if (!__must_push_back(m)) if (!__must_push_back(m))
r = -EIO; /* Failed */ r = -EIO; /* Failed */
goto out_unlock; goto out_unlock;
} else if (m->queue_io || m->pg_init_required) { } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
__pg_init_all_paths(m); __pg_init_all_paths(m);
goto out_unlock; goto out_unlock;
} }
...@@ -503,11 +510,22 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, ...@@ -503,11 +510,22 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
if (save_old_value) if (save_old_value) {
m->saved_queue_if_no_path = m->queue_if_no_path; if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
else
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
} else {
if (queue_if_no_path)
set_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
else else
m->saved_queue_if_no_path = queue_if_no_path; clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
m->queue_if_no_path = queue_if_no_path; }
if (queue_if_no_path)
set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
else
clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
if (!queue_if_no_path) if (!queue_if_no_path)
...@@ -600,10 +618,10 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps ...@@ -600,10 +618,10 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
goto bad; goto bad;
} }
if (m->retain_attached_hw_handler || m->hw_handler_name) if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) || m->hw_handler_name)
q = bdev_get_queue(p->path.dev->bdev); q = bdev_get_queue(p->path.dev->bdev);
if (m->retain_attached_hw_handler) { if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain: retain:
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name) { if (attached_handler_name) {
...@@ -808,7 +826,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) ...@@ -808,7 +826,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
} }
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
m->retain_attached_hw_handler = true; set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
continue; continue;
} }
...@@ -944,20 +962,16 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) ...@@ -944,20 +962,16 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m) static void flush_multipath_work(struct multipath *m)
{ {
unsigned long flags; set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
smp_mb__after_atomic();
spin_lock_irqsave(&m->lock, flags);
m->pg_init_disabled = true;
spin_unlock_irqrestore(&m->lock, flags);
flush_workqueue(kmpath_handlerd); flush_workqueue(kmpath_handlerd);
multipath_wait_for_pg_init_completion(m); multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd); flush_workqueue(kmultipathd);
flush_work(&m->trigger_event); flush_work(&m->trigger_event);
spin_lock_irqsave(&m->lock, flags); clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
m->pg_init_disabled = false; smp_mb__after_atomic();
spin_unlock_irqrestore(&m->lock, flags);
} }
static void multipath_dtr(struct dm_target *ti) static void multipath_dtr(struct dm_target *ti)
...@@ -1152,8 +1166,8 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) ...@@ -1152,8 +1166,8 @@ static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
if (m->pg_init_count <= m->pg_init_retries && !m->pg_init_disabled) if (m->pg_init_count <= m->pg_init_retries && !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
m->pg_init_required = true; set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
else else
limit_reached = true; limit_reached = true;
...@@ -1219,19 +1233,23 @@ static void pg_init_done(void *data, int errors) ...@@ -1219,19 +1233,23 @@ static void pg_init_done(void *data, int errors)
m->current_pgpath = NULL; m->current_pgpath = NULL;
m->current_pg = NULL; m->current_pg = NULL;
} }
} else if (!m->pg_init_required) } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
pg->bypassed = false; pg->bypassed = false;
if (--m->pg_init_in_progress) if (--m->pg_init_in_progress)
/* Activations of other paths are still on going */ /* Activations of other paths are still on going */
goto out; goto out;
if (m->pg_init_required) { if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
m->pg_init_delay_retry = delay_retry; if (delay_retry)
set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
else
clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
if (__pg_init_all_paths(m)) if (__pg_init_all_paths(m))
goto out; goto out;
} }
m->queue_io = false; clear_bit(MPATHF_QUEUE_IO, &m->flags);
/* /*
* Wake up any thread waiting to suspend. * Wake up any thread waiting to suspend.
...@@ -1300,7 +1318,7 @@ static int do_end_io(struct multipath *m, struct request *clone, ...@@ -1300,7 +1318,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
if (!m->nr_valid_paths) { if (!m->nr_valid_paths) {
if (!m->queue_if_no_path) { if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (!__must_push_back(m)) if (!__must_push_back(m))
r = -EIO; r = -EIO;
} else { } else {
...@@ -1364,11 +1382,12 @@ static void multipath_postsuspend(struct dm_target *ti) ...@@ -1364,11 +1382,12 @@ static void multipath_postsuspend(struct dm_target *ti)
static void multipath_resume(struct dm_target *ti) static void multipath_resume(struct dm_target *ti)
{ {
struct multipath *m = ti->private; struct multipath *m = ti->private;
unsigned long flags;
spin_lock_irqsave(&m->lock, flags); if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags))
m->queue_if_no_path = m->saved_queue_if_no_path; set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
spin_unlock_irqrestore(&m->lock, flags); else
clear_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
smp_mb__after_atomic();
} }
/* /*
...@@ -1402,19 +1421,19 @@ static void multipath_status(struct dm_target *ti, status_type_t type, ...@@ -1402,19 +1421,19 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
/* Features */ /* Features */
if (type == STATUSTYPE_INFO) if (type == STATUSTYPE_INFO)
DMEMIT("2 %u %u ", m->queue_io, m->pg_init_count); DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), m->pg_init_count);
else { else {
DMEMIT("%u ", m->queue_if_no_path + DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 + (m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
m->retain_attached_hw_handler); test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags));
if (m->queue_if_no_path) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
DMEMIT("queue_if_no_path "); DMEMIT("queue_if_no_path ");
if (m->pg_init_retries) if (m->pg_init_retries)
DMEMIT("pg_init_retries %u ", m->pg_init_retries); DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
if (m->retain_attached_hw_handler) if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler "); DMEMIT("retain_attached_hw_handler ");
} }
...@@ -1572,7 +1591,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1572,7 +1591,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
__choose_pgpath(m, 0); __choose_pgpath(m, 0);
if (m->current_pgpath) { if (m->current_pgpath) {
if (!m->queue_io) { if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
*bdev = m->current_pgpath->path.dev->bdev; *bdev = m->current_pgpath->path.dev->bdev;
*mode = m->current_pgpath->path.dev->mode; *mode = m->current_pgpath->path.dev->mode;
r = 0; r = 0;
...@@ -1582,7 +1601,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1582,7 +1601,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} }
} else { } else {
/* No path is available */ /* No path is available */
if (m->queue_if_no_path) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN; r = -ENOTCONN;
else else
r = -EIO; r = -EIO;
...@@ -1596,7 +1615,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1596,7 +1615,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/* Path status changed, redo selection */ /* Path status changed, redo selection */
__choose_pgpath(m, 0); __choose_pgpath(m, 0);
} }
if (m->pg_init_required) if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
__pg_init_all_paths(m); __pg_init_all_paths(m);
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irqrestore(&m->lock, flags);
dm_table_run_md_queue_async(m->ti->table); dm_table_run_md_queue_async(m->ti->table);
...@@ -1657,7 +1676,7 @@ static int multipath_busy(struct dm_target *ti) ...@@ -1657,7 +1676,7 @@ static int multipath_busy(struct dm_target *ti)
/* pg_init in progress or no paths available */ /* pg_init in progress or no paths available */
if (m->pg_init_in_progress || if (m->pg_init_in_progress ||
(!m->nr_valid_paths && m->queue_if_no_path)) { (!m->nr_valid_paths && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
busy = true; busy = true;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment