Commit 6ffeba96 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-4.4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:
 "Two fixes for 4.4-rc1's DM ioctl changes that introduced the potential
  for infinite recursion on ioctl (with DM multipath).

  And four stable fixes:

   - A DM thin-provisioning fix to restore 'error_if_no_space' setting
     when a thin-pool is made writable again (after having been out of
     space).

   - A DM thin-provisioning fix to properly advertise discard support
     for thin volumes that are stacked on a thin-pool whose underlying
     data device doesn't support discards.

   - A DM ioctl fix to allow ctrl-c to break out of an ioctl retry loop
     when DM multipath is configured to 'queue_if_no_path'.

   - A DM crypt fix for a possible hang on dm-crypt device removal"

* tag 'dm-4.4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm thin: fix regression in advertised discard limits
  dm crypt: fix a possible hang due to race condition on exit
  dm mpath: fix infinite recursion in ioctl when no paths and !queue_if_no_path
  dm: do not reuse dm_blk_ioctl block_device input as local variable
  dm: fix ioctl retry termination with signal
  dm thin: restore requested 'error_if_no_space' setting on OODS to WRITE transition
parents 81b1a832 0fcb04d5
......@@ -112,7 +112,8 @@ struct iv_tcw_private {
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
DM_CRYPT_EXIT_THREAD};
/*
* The fields in here must be read only after initialization.
......@@ -1203,20 +1204,18 @@ static int dmcrypt_write(void *data)
if (!RB_EMPTY_ROOT(&cc->write_tree))
goto pop_from_list;
if (unlikely(test_bit(DM_CRYPT_EXIT_THREAD, &cc->flags))) {
spin_unlock_irq(&cc->write_thread_wait.lock);
break;
}
__set_current_state(TASK_INTERRUPTIBLE);
__add_wait_queue(&cc->write_thread_wait, &wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
if (unlikely(kthread_should_stop())) {
set_task_state(current, TASK_RUNNING);
remove_wait_queue(&cc->write_thread_wait, &wait);
break;
}
schedule();
set_task_state(current, TASK_RUNNING);
spin_lock_irq(&cc->write_thread_wait.lock);
__remove_wait_queue(&cc->write_thread_wait, &wait);
goto continue_locked;
......@@ -1531,8 +1530,13 @@ static void crypt_dtr(struct dm_target *ti)
if (!cc)
return;
if (cc->write_thread)
if (cc->write_thread) {
spin_lock_irq(&cc->write_thread_wait.lock);
set_bit(DM_CRYPT_EXIT_THREAD, &cc->flags);
wake_up_locked(&cc->write_thread_wait);
spin_unlock_irq(&cc->write_thread_wait.lock);
kthread_stop(cc->write_thread);
}
if (cc->io_queue)
destroy_workqueue(cc->io_queue);
......
......@@ -1537,32 +1537,34 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
struct block_device **bdev, fmode_t *mode)
{
struct multipath *m = ti->private;
struct pgpath *pgpath;
unsigned long flags;
int r;
r = 0;
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pgpath)
__choose_pgpath(m, 0);
pgpath = m->current_pgpath;
if (pgpath) {
*bdev = pgpath->path.dev->bdev;
*mode = pgpath->path.dev->mode;
if (m->current_pgpath) {
if (!m->queue_io) {
*bdev = m->current_pgpath->path.dev->bdev;
*mode = m->current_pgpath->path.dev->mode;
r = 0;
} else {
/* pg_init has not started or completed */
r = -ENOTCONN;
}
if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
} else {
/* No path is available */
if (m->queue_if_no_path)
r = -ENOTCONN;
else if (!*bdev)
else
r = -EIO;
}
spin_unlock_irqrestore(&m->lock, flags);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
if (r == -ENOTCONN) {
spin_lock_irqsave(&m->lock, flags);
if (!m->current_pg) {
/* Path status changed, redo selection */
......
......@@ -2432,6 +2432,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard_bio;
......@@ -4249,10 +4250,9 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct thin_c *tc = ti->private;
struct pool *pool = tc->pool;
struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
if (!pool_limits->discard_granularity)
return; /* pool's discard support is disabled */
if (!pool->pf.discard_enabled)
return;
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
......
......@@ -591,7 +591,7 @@ static int dm_get_live_table_for_ioctl(struct mapped_device *md,
out:
dm_put_live_table(md, *srcu_idx);
if (r == -ENOTCONN) {
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
msleep(10);
goto retry;
}
......@@ -603,9 +603,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
{
struct mapped_device *md = bdev->bd_disk->private_data;
struct dm_target *tgt;
struct block_device *tgt_bdev = NULL;
int srcu_idx, r;
r = dm_get_live_table_for_ioctl(md, &tgt, &bdev, &mode, &srcu_idx);
r = dm_get_live_table_for_ioctl(md, &tgt, &tgt_bdev, &mode, &srcu_idx);
if (r < 0)
return r;
......@@ -620,7 +621,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
goto out;
}
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
r = __blkdev_driver_ioctl(tgt_bdev, mode, cmd, arg);
out:
dm_put_live_table(md, srcu_idx);
return r;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment