Commit a271a89c authored by Mike Snitzer's avatar Mike Snitzer

dm mpath: take m->lock spinlock when testing QUEUE_IF_NO_PATH

Fix multipath_end_io, multipath_end_io_bio and multipath_busy to take
m->lock while testing if MPATHF_QUEUE_IF_NO_PATH bit is set.  These are
all slow-path cases when no paths are available so extra locking isn't a
performance hit.  Correctness matters most.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent 69cea0d4
...@@ -1621,13 +1621,17 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, ...@@ -1621,13 +1621,17 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
if (pgpath) if (pgpath)
fail_path(pgpath); fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (!atomic_read(&m->nr_valid_paths)) {
!must_push_back_rq(m)) { unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (!must_push_back_rq(m)) {
if (error == BLK_STS_IOERR) if (error == BLK_STS_IOERR)
dm_report_EIO(m); dm_report_EIO(m);
/* complete with the original error */ /* complete with the original error */
r = DM_ENDIO_DONE; r = DM_ENDIO_DONE;
} }
spin_unlock_irqrestore(&m->lock, flags);
}
} }
if (pgpath) { if (pgpath) {
...@@ -1656,16 +1660,20 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, ...@@ -1656,16 +1660,20 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
if (pgpath) if (pgpath)
fail_path(pgpath); fail_path(pgpath);
if (atomic_read(&m->nr_valid_paths) == 0 && if (!atomic_read(&m->nr_valid_paths)) {
!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { spin_lock_irqsave(&m->lock, flags);
if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
if (__must_push_back(m)) { if (__must_push_back(m)) {
r = DM_ENDIO_REQUEUE; r = DM_ENDIO_REQUEUE;
} else { } else {
dm_report_EIO(m); dm_report_EIO(m);
*error = BLK_STS_IOERR; *error = BLK_STS_IOERR;
} }
spin_unlock_irqrestore(&m->lock, flags);
goto done; goto done;
} }
spin_unlock_irqrestore(&m->lock, flags);
}
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone); bio_list_add(&m->queued_bios, clone);
...@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti, ...@@ -1962,10 +1970,11 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
} }
} else { } else {
/* No path is available */ /* No path is available */
r = -EIO;
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
r = -ENOTCONN; r = -ENOTCONN;
else spin_unlock_irqrestore(&m->lock, flags);
r = -EIO;
} }
if (r == -ENOTCONN) { if (r == -ENOTCONN) {
...@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti) ...@@ -2036,8 +2045,15 @@ static int multipath_busy(struct dm_target *ti)
return true; return true;
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */ /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) if (!atomic_read(&m->nr_valid_paths)) {
unsigned long flags;
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
spin_unlock_irqrestore(&m->lock, flags);
return (m->queue_mode != DM_TYPE_REQUEST_BASED); return (m->queue_mode != DM_TYPE_REQUEST_BASED);
}
spin_unlock_irqrestore(&m->lock, flags);
}
/* Guess which priority_group will be used at next mapping time */ /* Guess which priority_group will be used at next mapping time */
pg = READ_ONCE(m->current_pg); pg = READ_ONCE(m->current_pg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment