Commit fe696909 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm

Pull device-mapper fixes from Alasdair Kergon:
 "A pair of patches to fix the writethrough mode of the device-mapper
  cache target when the device being cached is not itself wrapped with
  device-mapper."

* tag 'dm-3.9-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm:
  dm cache: reduce bio front_pad size in writeback mode
  dm cache: fix writes to cache device in writethrough mode
parents b196553a 19b0092e
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include "dm.h" #include "dm.h"
#include "dm-bio-prison.h" #include "dm-bio-prison.h"
#include "dm-bio-record.h"
#include "dm-cache-metadata.h" #include "dm-cache-metadata.h"
#include <linux/dm-io.h> #include <linux/dm-io.h>
...@@ -201,10 +202,15 @@ struct per_bio_data { ...@@ -201,10 +202,15 @@ struct per_bio_data {
unsigned req_nr:2; unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry; struct dm_deferred_entry *all_io_entry;
/* writethrough fields */ /*
* writethrough fields. These MUST remain at the end of this
* structure and the 'cache' member must be the first as it
* is used to determine the offsetof the writethrough fields.
*/
struct cache *cache; struct cache *cache;
dm_cblock_t cblock; dm_cblock_t cblock;
bio_end_io_t *saved_bi_end_io; bio_end_io_t *saved_bi_end_io;
struct dm_bio_details bio_details;
}; };
struct dm_cache_migration { struct dm_cache_migration {
...@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache) ...@@ -513,16 +519,28 @@ static void save_stats(struct cache *cache)
/*---------------------------------------------------------------- /*----------------------------------------------------------------
* Per bio data * Per bio data
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static struct per_bio_data *get_per_bio_data(struct bio *bio)
/*
* If using writeback, leave out struct per_bio_data's writethrough fields.
*/
#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
static size_t get_per_bio_data_size(struct cache *cache)
{
return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
}
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
{ {
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
BUG_ON(!pb); BUG_ON(!pb);
return pb; return pb;
} }
static struct per_bio_data *init_per_bio_data(struct bio *bio) static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
{ {
struct per_bio_data *pb = get_per_bio_data(bio); struct per_bio_data *pb = get_per_bio_data(bio, data_size);
pb->tick = false; pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio); pb->req_nr = dm_bio_get_target_bio_nr(bio);
...@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, ...@@ -556,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{ {
unsigned long flags; unsigned long flags;
struct per_bio_data *pb = get_per_bio_data(bio); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
spin_lock_irqsave(&cache->lock, flags); spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio && if (cache->need_tick_bio &&
...@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio) ...@@ -635,7 +654,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err) static void writethrough_endio(struct bio *bio, int err)
{ {
struct per_bio_data *pb = get_per_bio_data(bio); struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
bio->bi_end_io = pb->saved_bi_end_io; bio->bi_end_io = pb->saved_bi_end_io;
if (err) { if (err) {
...@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err) ...@@ -643,6 +662,7 @@ static void writethrough_endio(struct bio *bio, int err)
return; return;
} }
dm_bio_restore(&pb->bio_details, bio);
remap_to_cache(pb->cache, bio, pb->cblock); remap_to_cache(pb->cache, bio, pb->cblock);
/* /*
...@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err) ...@@ -662,11 +682,12 @@ static void writethrough_endio(struct bio *bio, int err)
static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock) dm_oblock_t oblock, dm_cblock_t cblock)
{ {
struct per_bio_data *pb = get_per_bio_data(bio); struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
pb->cache = cache; pb->cache = cache;
pb->cblock = cblock; pb->cblock = cblock;
pb->saved_bi_end_io = bio->bi_end_io; pb->saved_bi_end_io = bio->bi_end_io;
dm_bio_record(&pb->bio_details, bio);
bio->bi_end_io = writethrough_endio; bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock); remap_to_origin_clear_discard(pb->cache, bio, oblock);
...@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) ...@@ -1035,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void process_flush_bio(struct cache *cache, struct bio *bio) static void process_flush_bio(struct cache *cache, struct bio *bio)
{ {
struct per_bio_data *pb = get_per_bio_data(bio); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
BUG_ON(bio->bi_size); BUG_ON(bio->bi_size);
if (!pb->req_nr) if (!pb->req_nr)
...@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, ...@@ -1107,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
dm_oblock_t block = get_bio_block(cache, bio); dm_oblock_t block = get_bio_block(cache, bio);
struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
struct policy_result lookup_result; struct policy_result lookup_result;
struct per_bio_data *pb = get_per_bio_data(bio); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
bool discarded_block = is_discarded_oblock(cache, block); bool discarded_block = is_discarded_oblock(cache, block);
bool can_migrate = discarded_block || spare_migration_bandwidth(cache); bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
...@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -1881,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti; cache->ti = ca->ti;
ti->private = cache; ti->private = cache;
ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->num_flush_bios = 2; ti->num_flush_bios = 2;
ti->flush_supported = true; ti->flush_supported = true;
...@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -1890,6 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discard_zeroes_data_unsupported = true; ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features)); memcpy(&cache->features, &ca->features, sizeof(cache->features));
ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested; cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks); dm_table_add_target_callbacks(ti->table, &cache->callbacks);
...@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) ...@@ -2092,6 +2115,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r; int r;
dm_oblock_t block = get_bio_block(cache, bio); dm_oblock_t block = get_bio_block(cache, bio);
size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false; bool can_migrate = false;
bool discarded_block; bool discarded_block;
struct dm_bio_prison_cell *cell; struct dm_bio_prison_cell *cell;
...@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) ...@@ -2108,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
pb = init_per_bio_data(bio); pb = init_per_bio_data(bio, pb_data_size);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio); defer_bio(cache, bio);
...@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) ...@@ -2193,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
{ {
struct cache *cache = ti->private; struct cache *cache = ti->private;
unsigned long flags; unsigned long flags;
struct per_bio_data *pb = get_per_bio_data(bio); size_t pb_data_size = get_per_bio_data_size(cache);
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
if (pb->tick) { if (pb->tick) {
policy_tick(cache->policy); policy_tick(cache->policy);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment