Commit a271a89c authored by Mike Snitzer's avatar Mike Snitzer

dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH

Fix multipath_end_io, multipath_end_io_bio and multipath_busy to take
m->lock while testing if MPATHF_QUEUE_IF_NO_PATH bit is set.  These are
all slow-path cases when no paths are available so extra locking isn't a
performance hit.  Correctness matters most.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 69cea0d4
...@@ -1621,12 +1621,16 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, ...@@ -1621,12 +1621,16 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (pgpath) if (pgpath)
fail_path(pgpath); fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (!atomic_read(&m->nr_valid_paths)) {
!must_push_back_rq(m)) { unsigned long flags;
if (error == BLK_STS_IOERR) spin_lock_irqsave(&m->lock, flags);
dm_report_EIO(m); if (!must_push_back_rq(m)) {
/* complete with the original error */ if (error == BLK_STS_IOERR)
r = DM_ENDIO_DONE; dm_report_EIO(m);
/* complete with the original error */
r = DM_ENDIO_DONE;
}
spin_unlock_irqrestore(&m->lock, flags);
} }
} }
...@@ -1656,15 +1660,19 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, ...@@ -1656,15 +1660,19 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (pgpath) if (pgpath)
fail_path(pgpath); fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (!atomic_read(&m->nr_valid_paths)) {
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { spin_lock_irqsave(&m->lock, flags);
if (__must_push_back(m)) { if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
r = DM_ENDIO_REQUEUE; if (__must_push_back(m)) {
} else { r = DM_ENDIO_REQUEUE;
dm_report_EIO(m); } else {
*error = BLK_STS_IOERR; dm_report_EIO(m);
*error = BLK_STS_IOERR;
}
spin_unlock_irqrestore(&m->lock, flags);
goto done;
} }
goto done; spin_unlock_irqrestore(&m->lock, flags);
} }
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
...@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} }
} else { } else {
/* No path is available */ /* No path is available */
r = -EIO;
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN; r = -ENOTCONN;
else spin_unlock_irqrestore(&m->lock, flags);
r = -EIO;
} }
if (r == -ENOTCONN) { if (r == -ENOTCONN) {
...@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti) ...@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti)
return true; return true;
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */ /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (!atomic_read(&m->nr_valid_paths)) {
return (m->queue_mode != DM_TYPE_REQUEST_BASED); unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
spin_unlock_irqrestore(&m->lock, flags);
return (m->queue_mode != DM_TYPE_REQUEST_BASED);
}
spin_unlock_irqrestore(&m->lock, flags);
}
/* Guess which priority_group will be used at next mapping time */ /* Guess which priority_group will be used at next mapping time */
pg = READ_ONCE(m->current_pg); pg = READ_ONCE(m->current_pg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment