Commit 16961b04 authored by Mike Snitzer's avatar Mike Snitzer

dm thin: initialize dm_thin_new_mapping returned by get_next_mapping

As additional members are added to the dm_thin_new_mapping structure
care should be taken to make sure they get initialized before use.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Acked-by: default avatarJoe Thornber <ejt@redhat.com>
Cc: stable@vger.kernel.org
parent 319e2e3f
...@@ -751,13 +751,17 @@ static int ensure_next_mapping(struct pool *pool) ...@@ -751,13 +751,17 @@ static int ensure_next_mapping(struct pool *pool)
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{ {
struct dm_thin_new_mapping *r = pool->next_mapping; struct dm_thin_new_mapping *m = pool->next_mapping;
BUG_ON(!pool->next_mapping); BUG_ON(!pool->next_mapping);
memset(m, 0, sizeof(struct dm_thin_new_mapping));
INIT_LIST_HEAD(&m->list);
m->bio = NULL;
pool->next_mapping = NULL; pool->next_mapping = NULL;
return r; return m;
} }
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
...@@ -769,15 +773,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, ...@@ -769,15 +773,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool); struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 0;
m->prepared = 0;
m->tc = tc; m->tc = tc;
m->virt_block = virt_block; m->virt_block = virt_block;
m->data_block = data_dest; m->data_block = data_dest;
m->cell = cell; m->cell = cell;
m->err = 0;
m->bio = NULL;
if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
m->quiesced = 1; m->quiesced = 1;
...@@ -840,15 +839,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, ...@@ -840,15 +839,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool); struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 1; m->quiesced = 1;
m->prepared = 0; m->prepared = 0;
m->tc = tc; m->tc = tc;
m->virt_block = virt_block; m->virt_block = virt_block;
m->data_block = data_block; m->data_block = data_block;
m->cell = cell; m->cell = cell;
m->err = 0;
m->bio = NULL;
/* /*
* If the whole block of data is being overwritten or we are not * If the whole block of data is being overwritten or we are not
...@@ -1045,7 +1041,6 @@ static void process_discard(struct thin_c *tc, struct bio *bio) ...@@ -1045,7 +1041,6 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
m->data_block = lookup_result.block; m->data_block = lookup_result.block;
m->cell = cell; m->cell = cell;
m->cell2 = cell2; m->cell2 = cell2;
m->err = 0;
m->bio = bio; m->bio = bio;
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) { if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment