Commit 44fa816b authored by Anssi Hannula's avatar Anssi Hannula Committed by Mike Snitzer

dm cache: fix race affecting dirty block count

nr_dirty is updated without locking, causing it to drift so that it is
non-zero (either a small positive integer, or a very large one when an
underflow occurs) even when there are no actual dirty blocks.  This was
due to a race between the workqueue and map function accessing nr_dirty
in parallel without proper protection.

People were seeing under runs due to a race on increment/decrement of
nr_dirty, see: https://lkml.org/lkml/2014/6/3/648

Fix this by using an atomic_t for nr_dirty.

Reported-by: roma1390@gmail.com
Signed-off-by: default avatarAnssi Hannula <anssi.hannula@iki.fi>
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
parent d8c712ea
...@@ -231,7 +231,7 @@ struct cache { ...@@ -231,7 +231,7 @@ struct cache {
/* /*
* cache_size entries, dirty if set * cache_size entries, dirty if set
*/ */
dm_cblock_t nr_dirty; atomic_t nr_dirty;
unsigned long *dirty_bitset; unsigned long *dirty_bitset;
/* /*
...@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b) ...@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock) static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
{ {
if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1); atomic_inc(&cache->nr_dirty);
policy_set_dirty(cache->policy, oblock); policy_set_dirty(cache->policy, oblock);
} }
} }
...@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl ...@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
{ {
if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
policy_clear_dirty(cache->policy, oblock); policy_clear_dirty(cache->policy, oblock);
cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1); if (atomic_dec_return(&cache->nr_dirty) == 0)
if (!from_cblock(cache->nr_dirty))
dm_table_event(cache->ti->table); dm_table_event(cache->ti->table);
} }
} }
...@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->quiescing_ack, 0); atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM; r = -ENOMEM;
cache->nr_dirty = 0; atomic_set(&cache->nr_dirty, 0);
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) { if (!cache->dirty_bitset) {
*error = "could not allocate dirty bitset"; *error = "could not allocate dirty bitset";
...@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type, ...@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy); residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ", DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT), (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata, (unsigned long long)nr_blocks_metadata,
...@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type, ...@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.write_miss), (unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion), (unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion), (unsigned) atomic_read(&cache->stats.promotion),
(unsigned long long) from_cblock(cache->nr_dirty)); (unsigned long) atomic_read(&cache->nr_dirty));
if (writethrough_mode(&cache->features)) if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough "); DMEMIT("1 writethrough ");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment