Commit b7ca9c92 authored by Joe Thornber's avatar Joe Thornber Committed by Alasdair G Kergon

dm thin: replace dm_cell_release_singleton with cell_defer_except

Change existing users of the function dm_cell_release_singleton to share
cell_defer_except instead, and then remove the now-unused function.

Everywhere that calls dm_cell_release_singleton, the bio in question
is the holder of the cell.

If there are no non-holder entries in the cell then cell_defer_except
behaves exactly like dm_cell_release_singleton.  Conversely, if there
*are* non-holder entries then dm_cell_release_singleton must not be used
because those entries would need to be deferred.

Consequently, it is safe to replace use of dm_cell_release_singleton
with cell_defer_except.

This patch is a pre-requisite for "dm thin: fix race between
simultaneous io and discards to same block".
Signed-off-by: default avatarJoe Thornber <ejt@redhat.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent c1a94672
...@@ -207,31 +207,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios) ...@@ -207,31 +207,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
} }
EXPORT_SYMBOL_GPL(dm_cell_release); EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* There are a couple of places where we put a bio into a cell briefly
* before taking it out again. In these situations we know that no other
* bio may be in the cell. This function releases the cell, and also does
* a sanity check.
*/
static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
BUG_ON(cell->holder != bio);
BUG_ON(!bio_list_empty(&cell->bios));
__cell_release(cell, NULL);
}
void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
unsigned long flags;
struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release_singleton(cell, bio);
spin_unlock_irqrestore(&prison->lock, flags);
}
EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
/* /*
* Sometimes we don't want the holder, just the additional bios. * Sometimes we don't want the holder, just the additional bios.
*/ */
......
...@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key, ...@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
struct bio *inmate, struct dm_bio_prison_cell **ref); struct bio *inmate, struct dm_bio_prison_cell **ref);
void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios); void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates); void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
void dm_cell_error(struct dm_bio_prison_cell *cell); void dm_cell_error(struct dm_bio_prison_cell *cell);
......
...@@ -513,8 +513,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell, ...@@ -513,8 +513,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
} }
/* /*
* Same as cell_defer above, except it omits one particular detainee, * Same as cell_defer except it omits the original holder of the cell.
* a write bio that covers the block and has already been processed.
*/ */
static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell) static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{ {
...@@ -936,7 +935,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio) ...@@ -936,7 +935,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/ */
build_data_key(tc->td, lookup_result.block, &key2); build_data_key(tc->td, lookup_result.block, &key2);
if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) { if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
break; break;
} }
...@@ -967,8 +966,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio) ...@@ -967,8 +966,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
* a block boundary. So we submit the discard of a * a block boundary. So we submit the discard of a
* partial block appropriately. * partial block appropriately.
*/ */
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
dm_cell_release_singleton(cell2, bio); cell_defer_except(tc, cell2);
if ((!lookup_result.shared) && pool->pf.discard_passdown) if ((!lookup_result.shared) && pool->pf.discard_passdown)
remap_and_issue(tc, bio, lookup_result.block); remap_and_issue(tc, bio, lookup_result.block);
else else
...@@ -980,13 +979,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio) ...@@ -980,13 +979,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
/* /*
* It isn't provisioned, just forget it. * It isn't provisioned, just forget it.
*/ */
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
bio_endio(bio, 0); bio_endio(bio, 0);
break; break;
default: default:
DMERR("discard: find block unexpectedly returned %d", r); DMERR("discard: find block unexpectedly returned %d", r);
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
bio_io_error(bio); bio_io_error(bio);
break; break;
} }
...@@ -1041,7 +1040,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, ...@@ -1041,7 +1040,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
remap_and_issue(tc, bio, lookup_result->block); remap_and_issue(tc, bio, lookup_result->block);
} }
} }
...@@ -1056,7 +1055,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block ...@@ -1056,7 +1055,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
* Remap empty bios (flushes) immediately, without provisioning. * Remap empty bios (flushes) immediately, without provisioning.
*/ */
if (!bio->bi_size) { if (!bio->bi_size) {
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
remap_and_issue(tc, bio, 0); remap_and_issue(tc, bio, 0);
return; return;
} }
...@@ -1066,7 +1065,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block ...@@ -1066,7 +1065,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
*/ */
if (bio_data_dir(bio) == READ) { if (bio_data_dir(bio) == READ) {
zero_fill_bio(bio); zero_fill_bio(bio);
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
bio_endio(bio, 0); bio_endio(bio, 0);
return; return;
} }
...@@ -1120,7 +1119,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) ...@@ -1120,7 +1119,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
* TODO: this will probably have to change when discard goes * TODO: this will probably have to change when discard goes
* back in. * back in.
*/ */
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
if (lookup_result.shared) if (lookup_result.shared)
process_shared_bio(tc, bio, block, &lookup_result); process_shared_bio(tc, bio, block, &lookup_result);
...@@ -1130,7 +1129,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) ...@@ -1130,7 +1129,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case -ENODATA: case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) { if (bio_data_dir(bio) == READ && tc->origin_dev) {
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
remap_to_origin_and_issue(tc, bio); remap_to_origin_and_issue(tc, bio);
} else } else
provision_block(tc, bio, block, cell); provision_block(tc, bio, block, cell);
...@@ -1138,7 +1137,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio) ...@@ -1138,7 +1137,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
default: default:
DMERR("dm_thin_find_block() failed, error = %d", r); DMERR("dm_thin_find_block() failed, error = %d", r);
dm_cell_release_singleton(cell, bio); cell_defer_except(tc, cell);
bio_io_error(bio); bio_io_error(bio);
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment