Commit d0db3a39 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-4.2-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - fix DM thinp to consistently return -ENOSPC when out of data space

 - fix a logic bug in the DM cache smq policy's creation error path

 - revert a DM cache 4.2-rc3 change that reduced writeback efficiency

 - fix a hang on DM cache device destruction due to improper
   prealloc_used accounting introduced in 4.2-rc3

 - update URL for dm-crypt wiki page

* tag 'dm-4.2-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix device destroy hang due to improper prealloc_used accounting
  Revert "dm cache: do not wake_worker() in free_migration()"
  dm crypt: update wiki page URL
  dm cache policy smq: fix alloc_bitset check that always evaluates as false
  dm thin: return -ENOSPC when erroring retry list due to out of data space
parents 86ea07ca 795e633a
......@@ -259,7 +259,7 @@ config DM_CRYPT
the ciphers you're going to use in the cryptoapi configuration.
For further information on dm-crypt and userspace tools see:
<http://code.google.com/p/cryptsetup/wiki/DMCrypt>
<https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
To compile this code as a module, choose M here: the module will
be called dm-crypt.
......
......@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
if (from_cblock(cache_size)) {
mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
if (!mq->cache_hit_bits && mq->cache_hit_bits) {
if (!mq->cache_hit_bits) {
DMERR("couldn't allocate cache hit bitset");
goto bad_cache_hit_bits;
}
......
......@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
wake_up(&cache->migration_wait);
mempool_free(mg, cache->migration_pool);
wake_worker(cache);
}
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
......@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
bio_list_merge(&cache->deferred_bios, &bios);
......@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
process_discard_bio(cache, &structs, bio);
else
process_bio(cache, &structs, bio);
prealloc_used = true;
}
if (prealloc_used)
......@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
* this bio might require one, we pause until there are some
* prepared mappings to process.
*/
prealloc_used = true;
if (prealloc_data_structs(cache, &structs)) {
spin_lock_irqsave(&cache->lock, flags);
list_splice(&cells, &cache->deferred_cells);
......@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
}
process_cell(cache, &structs, cell);
prealloc_used = true;
}
if (prealloc_used)
......@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
break; /* no work to do */
prealloc_used = true;
if (prealloc_data_structs(cache, &structs) ||
get_cell(cache, oblock, &structs, &old_ocell)) {
policy_set_dirty(cache->policy, oblock);
......@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
}
writeback(cache, &structs, oblock, cblock, old_ocell);
prealloc_used = true;
}
if (prealloc_used)
......
......@@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
requeue_deferred_cells(tc);
}
static void error_retry_list(struct pool *pool)
static void error_retry_list_with_code(struct pool *pool, int error)
{
struct thin_c *tc;
rcu_read_lock();
list_for_each_entry_rcu(tc, &pool->active_thins, list)
error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO);
error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
rcu_read_unlock();
}
static void error_retry_list(struct pool *pool)
{
return error_retry_list_with_code(pool, -EIO);
}
/*
* This section of code contains the logic for processing a thin device's IO.
* Much of the code depends on pool object resources (lists, workqueues, etc)
......@@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true;
notify_of_pool_mode_change_to_oods(pool);
error_retry_list(pool);
error_retry_list_with_code(pool, -ENOSPC);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment