Commit 8e3c3827 authored by Mike Snitzer's avatar Mike Snitzer

dm cache: pass cache structure to mode functions

No functional changes, just a bit cleaner than passing cache_features
structure.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent d1260e2a
...@@ -515,19 +515,19 @@ struct dm_cache_migration { ...@@ -515,19 +515,19 @@ struct dm_cache_migration {
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static bool writethrough_mode(struct cache_features *f) static bool writethrough_mode(struct cache *cache)
{ {
return f->io_mode == CM_IO_WRITETHROUGH; return cache->features.io_mode == CM_IO_WRITETHROUGH;
} }
static bool writeback_mode(struct cache_features *f) static bool writeback_mode(struct cache *cache)
{ {
return f->io_mode == CM_IO_WRITEBACK; return cache->features.io_mode == CM_IO_WRITEBACK;
} }
static inline bool passthrough_mode(struct cache_features *f) static inline bool passthrough_mode(struct cache *cache)
{ {
return unlikely(f->io_mode == CM_IO_PASSTHROUGH); return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH);
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
...@@ -544,7 +544,7 @@ static void wake_deferred_writethrough_worker(struct cache *cache) ...@@ -544,7 +544,7 @@ static void wake_deferred_writethrough_worker(struct cache *cache)
static void wake_migration_worker(struct cache *cache) static void wake_migration_worker(struct cache *cache)
{ {
if (passthrough_mode(&cache->features)) if (passthrough_mode(cache))
return; return;
queue_work(cache->wq, &cache->migration_worker); queue_work(cache->wq, &cache->migration_worker);
...@@ -626,7 +626,7 @@ static unsigned lock_level(struct bio *bio) ...@@ -626,7 +626,7 @@ static unsigned lock_level(struct bio *bio)
static size_t get_per_bio_data_size(struct cache *cache) static size_t get_per_bio_data_size(struct cache *cache)
{ {
return writethrough_mode(&cache->features) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; return writethrough_mode(cache) ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
} }
static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
...@@ -1209,7 +1209,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) ...@@ -1209,7 +1209,7 @@ static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
{ {
return writeback_mode(&cache->features) && return writeback_mode(cache) &&
(is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
} }
...@@ -1862,7 +1862,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, ...@@ -1862,7 +1862,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
* Passthrough always maps to the origin, invalidating any * Passthrough always maps to the origin, invalidating any
* cache blocks that are written to. * cache blocks that are written to.
*/ */
if (passthrough_mode(&cache->features)) { if (passthrough_mode(cache)) {
if (bio_data_dir(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
bio_drop_shared_lock(cache, bio); bio_drop_shared_lock(cache, bio);
atomic_inc(&cache->stats.demotion); atomic_inc(&cache->stats.demotion);
...@@ -1871,7 +1871,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, ...@@ -1871,7 +1871,7 @@ static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
remap_to_origin_clear_discard(cache, bio, block); remap_to_origin_clear_discard(cache, bio, block);
} else { } else {
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
!is_dirty(cache, cblock)) { !is_dirty(cache, cblock)) {
remap_to_origin_then_cache(cache, bio, block, cblock); remap_to_origin_then_cache(cache, bio, block, cblock);
accounted_begin(cache, bio); accounted_begin(cache, bio);
...@@ -2638,7 +2638,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -2638,7 +2638,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad; goto bad;
} }
if (passthrough_mode(&cache->features)) { if (passthrough_mode(cache)) {
bool all_clean; bool all_clean;
r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); r = dm_cache_metadata_all_clean(cache->cmd, &all_clean);
...@@ -3263,13 +3263,13 @@ static void cache_status(struct dm_target *ti, status_type_t type, ...@@ -3263,13 +3263,13 @@ static void cache_status(struct dm_target *ti, status_type_t type,
else else
DMEMIT("1 "); DMEMIT("1 ");
if (writethrough_mode(&cache->features)) if (writethrough_mode(cache))
DMEMIT("writethrough "); DMEMIT("writethrough ");
else if (passthrough_mode(&cache->features)) else if (passthrough_mode(cache))
DMEMIT("passthrough "); DMEMIT("passthrough ");
else if (writeback_mode(&cache->features)) else if (writeback_mode(cache))
DMEMIT("writeback "); DMEMIT("writeback ");
else { else {
...@@ -3435,7 +3435,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun ...@@ -3435,7 +3435,7 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
unsigned i; unsigned i;
struct cblock_range range; struct cblock_range range;
if (!passthrough_mode(&cache->features)) { if (!passthrough_mode(cache)) {
DMERR("%s: cache has to be in passthrough mode for invalidation", DMERR("%s: cache has to be in passthrough mode for invalidation",
cache_device_name(cache)); cache_device_name(cache));
return -EPERM; return -EPERM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment