Commit ec667158 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm

Pull device-mapper updates from Alasdair Kergon:
 "Allow devices that hold metadata for the device-mapper thin
  provisioning target to be extended easily; allow WRITE SAME on
  multipath devices; an assortment of little fixes and clean-ups."

* tag 'dm-3.10-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm: (21 commits)
  dm cache: set config value
  dm cache: move config fns
  dm thin: generate event when metadata threshold passed
  dm persistent metadata: add space map threshold callback
  dm persistent data: add threshold callback to space map
  dm thin: detect metadata device resizing
  dm persistent data: support space map resizing
  dm thin: open dev read only when possible
  dm thin: refactor data dev resize
  dm cache: replace memcpy with struct assignment
  dm cache: fix typos in comments
  dm cache policy: fix description of lookup fn
  dm: document iterate_devices
  dm persistent data: fix error message typos
  dm cache: tune migration throttling
  dm mpath: enable WRITE SAME support
  dm table: fix write same support
  dm bufio: avoid a possible __vmalloc deadlock
  dm snapshot: fix error return code in snapshot_ctr
  dm cache: fix error return code in cache_create
  ...
parents f755407d 2f14f4b5
...@@ -319,6 +319,9 @@ static void __cache_size_refresh(void) ...@@ -319,6 +319,9 @@ static void __cache_size_refresh(void)
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
enum data_mode *data_mode) enum data_mode *data_mode)
{ {
unsigned noio_flag;
void *ptr;
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
*data_mode = DATA_MODE_SLAB; *data_mode = DATA_MODE_SLAB;
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
...@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, ...@@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
} }
*data_mode = DATA_MODE_VMALLOC; *data_mode = DATA_MODE_VMALLOC;
return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
/*
* __vmalloc allocates the data pages and auxiliary structures with
* gfp_flags that were specified, but pagetables are always allocated
* with GFP_KERNEL, no matter what was specified as gfp_mask.
*
* Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
* all allocations done by this process (including pagetables) are done
* as if GFP_NOIO was specified.
*/
if (gfp_mask & __GFP_NORETRY)
noio_flag = memalloc_noio_save();
ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
if (gfp_mask & __GFP_NORETRY)
memalloc_noio_restore(noio_flag);
return ptr;
} }
/* /*
......
...@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, ...@@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats) struct dm_cache_statistics *stats)
{ {
down_read(&cmd->root_lock); down_read(&cmd->root_lock);
memcpy(stats, &cmd->stats, sizeof(*stats)); *stats = cmd->stats;
up_read(&cmd->root_lock); up_read(&cmd->root_lock);
} }
...@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, ...@@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
struct dm_cache_statistics *stats) struct dm_cache_statistics *stats)
{ {
down_write(&cmd->root_lock); down_write(&cmd->root_lock);
memcpy(&cmd->stats, stats, sizeof(*stats)); cmd->stats = *stats;
up_write(&cmd->root_lock); up_write(&cmd->root_lock);
} }
......
...@@ -130,8 +130,8 @@ struct dm_cache_policy { ...@@ -130,8 +130,8 @@ struct dm_cache_policy {
* *
* Must not block. * Must not block.
* *
* Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
* would be typical). * (-EWOULDBLOCK would be typical).
*/ */
int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
......
...@@ -205,7 +205,7 @@ struct per_bio_data { ...@@ -205,7 +205,7 @@ struct per_bio_data {
/* /*
* writethrough fields. These MUST remain at the end of this * writethrough fields. These MUST remain at the end of this
* structure and the 'cache' member must be the first as it * structure and the 'cache' member must be the first as it
* is used to determine the offsetof the writethrough fields. * is used to determine the offset of the writethrough fields.
*/ */
struct cache *cache; struct cache *cache;
dm_cblock_t cblock; dm_cblock_t cblock;
...@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache, ...@@ -393,7 +393,7 @@ static int get_cell(struct cache *cache,
return r; return r;
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static bool is_dirty(struct cache *cache, dm_cblock_t b) static bool is_dirty(struct cache *cache, dm_cblock_t b)
{ {
...@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl ...@@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
} }
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static bool block_size_is_power_of_two(struct cache *cache) static bool block_size_is_power_of_two(struct cache *cache)
{ {
return cache->sectors_per_block_shift >= 0; return cache->sectors_per_block_shift >= 0;
...@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err) ...@@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err)
/* /*
* We can't issue this bio directly, since we're in interrupt * We can't issue this bio directly, since we're in interrupt
* context. So it get's put on a bio list for processing by the * context. So it gets put on a bio list for processing by the
* worker thread. * worker thread.
*/ */
defer_writethrough_bio(pb->cache, bio); defer_writethrough_bio(pb->cache, bio);
...@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws) ...@@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws) static void do_waker(struct work_struct *ws)
{ {
struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
policy_tick(cache->policy);
wake_worker(cache); wake_worker(cache);
queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
} }
...@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, ...@@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
static struct kmem_cache *migration_cache; static struct kmem_cache *migration_cache;
static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) #define NOT_CORE_OPTION 1
static int process_config_option(struct cache *cache, const char *key, const char *value)
{
unsigned long tmp;
if (!strcasecmp(key, "migration_threshold")) {
if (kstrtoul(value, 10, &tmp))
return -EINVAL;
cache->migration_threshold = tmp;
return 0;
}
return NOT_CORE_OPTION;
}
static int set_config_value(struct cache *cache, const char *key, const char *value)
{
int r = process_config_option(cache, key, value);
if (r == NOT_CORE_OPTION)
r = policy_set_config_value(cache->policy, key, value);
if (r)
DMWARN("bad config value for %s: %s", key, value);
return r;
}
static int set_config_values(struct cache *cache, int argc, const char **argv)
{ {
int r = 0; int r = 0;
...@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a ...@@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
} }
while (argc) { while (argc) {
r = policy_set_config_value(p, argv[0], argv[1]); r = set_config_value(cache, argv[0], argv[1]);
if (r) { if (r)
DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", break;
argv[0], argv[1]);
return r;
}
argc -= 2; argc -= 2;
argv += 2; argv += 2;
...@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a ...@@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a
static int create_cache_policy(struct cache *cache, struct cache_args *ca, static int create_cache_policy(struct cache *cache, struct cache_args *ca,
char **error) char **error)
{ {
int r;
cache->policy = dm_cache_policy_create(ca->policy_name, cache->policy = dm_cache_policy_create(ca->policy_name,
cache->cache_size, cache->cache_size,
cache->origin_sectors, cache->origin_sectors,
...@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, ...@@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
return -ENOMEM; return -ENOMEM;
} }
r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); return 0;
if (r) {
*error = "Error setting cache policy's config values";
dm_cache_policy_destroy(cache->policy);
cache->policy = NULL;
}
return r;
} }
/* /*
...@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, ...@@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
return discard_block_size; return discard_block_size;
} }
#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) #define DEFAULT_MIGRATION_THRESHOLD 2048
static int cache_create(struct cache_args *ca, struct cache **result) static int cache_create(struct cache_args *ca, struct cache **result)
{ {
...@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discards_supported = true; ti->discards_supported = true;
ti->discard_zeroes_data_unsupported = true; ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features)); cache->features = ca->features;
ti->per_bio_data_size = get_per_bio_data_size(cache); ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested; cache->callbacks.congested_fn = cache_is_congested;
...@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result)
r = create_cache_policy(cache, ca, error); r = create_cache_policy(cache, ca, error);
if (r) if (r)
goto bad; goto bad;
cache->policy_nr_args = ca->policy_argc; cache->policy_nr_args = ca->policy_argc;
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
if (r) {
*error = "Error setting cache policy's config values";
goto bad;
}
cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
ca->block_size, may_format, ca->block_size, may_format,
...@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) ...@@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations);
cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
atomic_set(&cache->nr_migrations, 0); atomic_set(&cache->nr_migrations, 0);
init_waitqueue_head(&cache->migration_wait); init_waitqueue_head(&cache->migration_wait);
r = -ENOMEM;
cache->nr_dirty = 0; cache->nr_dirty = 0;
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) { if (!cache->dirty_bitset) {
...@@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type, ...@@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type,
DMEMIT("Error"); DMEMIT("Error");
} }
#define NOT_CORE_OPTION 1
static int process_config_option(struct cache *cache, char **argv)
{
unsigned long tmp;
if (!strcasecmp(argv[0], "migration_threshold")) {
if (kstrtoul(argv[1], 10, &tmp))
return -EINVAL;
cache->migration_threshold = tmp;
return 0;
}
return NOT_CORE_OPTION;
}
/* /*
* Supports <key> <value>. * Supports <key> <value>.
* *
...@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv) ...@@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv)
*/ */
static int cache_message(struct dm_target *ti, unsigned argc, char **argv) static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
{ {
int r;
struct cache *cache = ti->private; struct cache *cache = ti->private;
if (argc != 2) if (argc != 2)
return -EINVAL; return -EINVAL;
r = process_config_option(cache, argv); return set_config_value(cache, argv[0], argv[1]);
if (r == NOT_CORE_OPTION)
return policy_set_config_value(cache->policy, argv[0], argv[1]);
return r;
} }
static int cache_iterate_devices(struct dm_target *ti, static int cache_iterate_devices(struct dm_target *ti,
...@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) ...@@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = { static struct target_type cache_target = {
.name = "cache", .name = "cache",
.version = {1, 1, 0}, .version = {1, 1, 1},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = cache_ctr, .ctr = cache_ctr,
.dtr = cache_dtr, .dtr = cache_dtr,
......
...@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ...@@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
ti->num_flush_bios = 1; ti->num_flush_bios = 1;
ti->num_discard_bios = 1; ti->num_discard_bios = 1;
ti->num_write_same_bios = 1;
return 0; return 0;
......
...@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
if (!s->pending_pool) { if (!s->pending_pool) {
ti->error = "Could not allocate mempool for pending exceptions"; ti->error = "Could not allocate mempool for pending exceptions";
r = -ENOMEM;
goto bad_pending_pool; goto bad_pending_pool;
} }
......
...@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, ...@@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{ {
struct stripe_c *sc; struct stripe_c *sc;
sector_t width; sector_t width, tmp_len;
uint32_t stripes; uint32_t stripes;
uint32_t chunk_size; uint32_t chunk_size;
int r; int r;
...@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) ...@@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
} }
width = ti->len; width = ti->len;
if (sector_div(width, chunk_size)) { if (sector_div(width, stripes)) {
ti->error = "Target length not divisible by " ti->error = "Target length not divisible by "
"chunk size"; "number of stripes";
return -EINVAL; return -EINVAL;
} }
if (sector_div(width, stripes)) { tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
ti->error = "Target length not divisible by " ti->error = "Target length not divisible by "
"number of stripes"; "chunk size";
return -EINVAL; return -EINVAL;
} }
......
...@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t) ...@@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
return false; return false;
if (!ti->type->iterate_devices || if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) ti->type->iterate_devices(ti, device_not_write_same_capable, NULL))
return false; return false;
} }
......
...@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, ...@@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
return r; return r;
} }
static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count)
{ {
int r; int r;
dm_block_t old_count; dm_block_t old_count;
r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); r = dm_sm_get_nr_blocks(sm, &old_count);
if (r) if (r)
return r; return r;
...@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) ...@@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
return 0; return 0;
if (new_count < old_count) { if (new_count < old_count) {
DMERR("cannot reduce size of data device"); DMERR("cannot reduce size of space map");
return -EINVAL; return -EINVAL;
} }
return dm_sm_extend(pmd->data_sm, new_count - old_count); return dm_sm_extend(sm, new_count - old_count);
} }
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
...@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) ...@@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
down_write(&pmd->root_lock); down_write(&pmd->root_lock);
if (!pmd->fail_io) if (!pmd->fail_io)
r = __resize_data_dev(pmd, new_count); r = __resize_space_map(pmd->data_sm, new_count);
up_write(&pmd->root_lock);
return r;
}
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
{
int r = -EINVAL;
down_write(&pmd->root_lock);
if (!pmd->fail_io)
r = __resize_space_map(pmd->metadata_sm, new_count);
up_write(&pmd->root_lock); up_write(&pmd->root_lock);
return r; return r;
...@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) ...@@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
dm_bm_set_read_only(pmd->bm); dm_bm_set_read_only(pmd->bm);
up_write(&pmd->root_lock); up_write(&pmd->root_lock);
} }
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
int r;
down_write(&pmd->root_lock);
r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context);
up_write(&pmd->root_lock);
return r;
}
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#define DM_THIN_METADATA_H #define DM_THIN_METADATA_H
#include "persistent-data/dm-block-manager.h" #include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
#define THIN_METADATA_BLOCK_SIZE 4096 #define THIN_METADATA_BLOCK_SIZE 4096
...@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); ...@@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
* blocks would be lost. * blocks would be lost.
*/ */
int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
/* /*
* Flicks the underlying block manager into read only mode, so you know * Flicks the underlying block manager into read only mode, so you know
...@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); ...@@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size);
*/ */
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context);
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
#endif #endif
...@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) ...@@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r; return r;
if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
DMWARN("%s: reached low water mark, sending event.", DMWARN("%s: reached low water mark for data device: sending event.",
dm_device_name(pool->pool_md)); dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
pool->low_water_triggered = 1; pool->low_water_triggered = 1;
...@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio) ...@@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio)
bio_io_error(bio); bio_io_error(bio);
} }
/*
* FIXME: should we also commit due to size of transaction, measured in
* metadata blocks?
*/
static int need_commit_due_to_time(struct pool *pool) static int need_commit_due_to_time(struct pool *pool)
{ {
return jiffies < pool->last_commit_jiffies || return jiffies < pool->last_commit_jiffies ||
...@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, ...@@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
return r; return r;
} }
static void metadata_low_callback(void *context)
{
struct pool *pool = context;
DMWARN("%s: reached low water mark for metadata device: sending event.",
dm_device_name(pool->pool_md));
dm_table_event(pool->ti->table);
}
static sector_t get_metadata_dev_size(struct block_device *bdev)
{
sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
char buffer[BDEVNAME_SIZE];
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
}
return metadata_dev_size;
}
static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
return metadata_dev_size;
}
/*
* When a metadata threshold is crossed a dm event is triggered, and
* userland should respond by growing the metadata device. We could let
* userland set the threshold, like we do with the data threshold, but I'm
* not sure they know enough to do this well.
*/
static dm_block_t calc_metadata_threshold(struct pool_c *pt)
{
/*
* 4M is ample for all ops with the possible exception of thin
* device deletion which is harmless if it fails (just retry the
* delete after you've grown the device).
*/
dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
return min((dm_block_t)1024ULL /* 4M */, quarter);
}
/* /*
* thin-pool <metadata dev> <data dev> * thin-pool <metadata dev> <data dev>
* <data block size (sectors)> * <data block size (sectors)>
...@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
unsigned long block_size; unsigned long block_size;
dm_block_t low_water_blocks; dm_block_t low_water_blocks;
struct dm_dev *metadata_dev; struct dm_dev *metadata_dev;
sector_t metadata_dev_size; fmode_t metadata_mode;
char b[BDEVNAME_SIZE];
/* /*
* FIXME Remove validation from scope of lock. * FIXME Remove validation from scope of lock.
...@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL; r = -EINVAL;
goto out_unlock; goto out_unlock;
} }
as.argc = argc; as.argc = argc;
as.argv = argv; as.argv = argv;
r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); /*
* Set default pool features.
*/
pool_features_init(&pf);
dm_consume_args(&as, 4);
r = parse_pool_features(&as, &pf, ti);
if (r)
goto out_unlock;
metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
if (r) { if (r) {
ti->error = "Error opening metadata block device"; ti->error = "Error opening metadata block device";
goto out_unlock; goto out_unlock;
} }
metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; /*
if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) * Run for the side-effect of possibly issuing a warning if the
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", * device is too big.
bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); */
(void) get_metadata_dev_size(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) { if (r) {
...@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto out; goto out;
} }
/*
* Set default pool features.
*/
pool_features_init(&pf);
dm_consume_args(&as, 4);
r = parse_pool_features(&as, &pf, ti);
if (r)
goto out;
pt = kzalloc(sizeof(*pt), GFP_KERNEL); pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt) { if (!pt) {
r = -ENOMEM; r = -ENOMEM;
...@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
} }
ti->private = pt; ti->private = pt;
r = dm_pool_register_metadata_threshold(pt->pool->pmd,
calc_metadata_threshold(pt),
metadata_low_callback,
pool);
if (r)
goto out_free_pt;
pt->callbacks.congested_fn = pool_is_congested; pt->callbacks.congested_fn = pool_is_congested;
dm_table_add_target_callbacks(ti->table, &pt->callbacks); dm_table_add_target_callbacks(ti->table, &pt->callbacks);
...@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) ...@@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
return r; return r;
} }
/* static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
* Retrieves the number of blocks of the data device from
* the superblock and compares it to the actual device size,
* thus resizing the data device in case it has grown.
*
* This both copes with opening preallocated data devices in the ctr
* being followed by a resume
* -and-
* calling the resume method individually after userspace has
* grown the data device in reaction to a table event.
*/
static int pool_preresume(struct dm_target *ti)
{ {
int r; int r;
struct pool_c *pt = ti->private; struct pool_c *pt = ti->private;
...@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti) ...@@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti)
sector_t data_size = ti->len; sector_t data_size = ti->len;
dm_block_t sb_data_size; dm_block_t sb_data_size;
/* *need_commit = false;
* Take control of the pool object.
*/
r = bind_control_target(pool, ti);
if (r)
return r;
(void) sector_div(data_size, pool->sectors_per_block); (void) sector_div(data_size, pool->sectors_per_block);
...@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti) ...@@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti)
} }
if (data_size < sb_data_size) { if (data_size < sb_data_size) {
DMERR("pool target too small, is %llu blocks (expected %llu)", DMERR("pool target (%llu blocks) too small: expected %llu",
(unsigned long long)data_size, sb_data_size); (unsigned long long)data_size, sb_data_size);
return -EINVAL; return -EINVAL;
...@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti) ...@@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti)
r = dm_pool_resize_data_dev(pool->pmd, data_size); r = dm_pool_resize_data_dev(pool->pmd, data_size);
if (r) { if (r) {
DMERR("failed to resize data device"); DMERR("failed to resize data device");
/* FIXME Stricter than necessary: Rollback transaction instead here */
set_pool_mode(pool, PM_READ_ONLY); set_pool_mode(pool, PM_READ_ONLY);
return r; return r;
} }
(void) commit_or_fallback(pool); *need_commit = true;
} }
return 0; return 0;
} }
static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
{
int r;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
dm_block_t metadata_dev_size, sb_metadata_dev_size;
*need_commit = false;
metadata_dev_size = get_metadata_dev_size(pool->md_dev);
r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
if (r) {
DMERR("failed to retrieve data device size");
return r;
}
if (metadata_dev_size < sb_metadata_dev_size) {
DMERR("metadata device (%llu sectors) too small: expected %llu",
metadata_dev_size, sb_metadata_dev_size);
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
DMERR("failed to resize metadata device");
return r;
}
*need_commit = true;
}
return 0;
}
/*
* Retrieves the number of blocks of the data device from
* the superblock and compares it to the actual device size,
* thus resizing the data device in case it has grown.
*
* This both copes with opening preallocated data devices in the ctr
* being followed by a resume
* -and-
* calling the resume method individually after userspace has
* grown the data device in reaction to a table event.
*/
static int pool_preresume(struct dm_target *ti)
{
int r;
bool need_commit1, need_commit2;
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
/*
* Take control of the pool object.
*/
r = bind_control_target(pool, ti);
if (r)
return r;
r = maybe_resize_data_dev(ti, &need_commit1);
if (r)
return r;
r = maybe_resize_metadata_dev(ti, &need_commit2);
if (r)
return r;
if (need_commit1 || need_commit2)
(void) commit_or_fallback(pool);
return 0;
}
static void pool_resume(struct dm_target *ti) static void pool_resume(struct dm_target *ti)
{ {
struct pool_c *pt = ti->private; struct pool_c *pt = ti->private;
...@@ -2549,7 +2669,7 @@ static struct target_type pool_target = { ...@@ -2549,7 +2669,7 @@ static struct target_type pool_target = {
.name = "thin-pool", .name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE, DM_TARGET_IMMUTABLE,
.version = {1, 7, 0}, .version = {1, 8, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = pool_ctr, .ctr = pool_ctr,
.dtr = pool_dtr, .dtr = pool_dtr,
......
...@@ -248,7 +248,8 @@ static struct dm_space_map ops = { ...@@ -248,7 +248,8 @@ static struct dm_space_map ops = {
.new_block = sm_disk_new_block, .new_block = sm_disk_new_block,
.commit = sm_disk_commit, .commit = sm_disk_commit,
.root_size = sm_disk_root_size, .root_size = sm_disk_root_size,
.copy_root = sm_disk_copy_root .copy_root = sm_disk_copy_root,
.register_threshold_callback = NULL
}; };
struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
......
...@@ -16,6 +16,55 @@ ...@@ -16,6 +16,55 @@
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
/*
* An edge triggered threshold.
*/
struct threshold {
bool threshold_set;
bool value_set;
dm_block_t threshold;
dm_block_t current_value;
dm_sm_threshold_fn fn;
void *context;
};
static void threshold_init(struct threshold *t)
{
t->threshold_set = false;
t->value_set = false;
}
static void set_threshold(struct threshold *t, dm_block_t value,
dm_sm_threshold_fn fn, void *context)
{
t->threshold_set = true;
t->threshold = value;
t->fn = fn;
t->context = context;
}
static bool below_threshold(struct threshold *t, dm_block_t value)
{
return t->threshold_set && value <= t->threshold;
}
static bool threshold_already_triggered(struct threshold *t)
{
return t->value_set && below_threshold(t, t->current_value);
}
static void check_threshold(struct threshold *t, dm_block_t value)
{
if (below_threshold(t, value) &&
!threshold_already_triggered(t))
t->fn(t->context);
t->value_set = true;
t->current_value = value;
}
/*----------------------------------------------------------------*/
/* /*
* Space map interface. * Space map interface.
* *
...@@ -54,6 +103,8 @@ struct sm_metadata { ...@@ -54,6 +103,8 @@ struct sm_metadata {
unsigned allocated_this_transaction; unsigned allocated_this_transaction;
unsigned nr_uncommitted; unsigned nr_uncommitted;
struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
struct threshold threshold;
}; };
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
...@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm) ...@@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm)
kfree(smm); kfree(smm);
} }
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
DMERR("doesn't support extend");
return -EINVAL;
}
static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{ {
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
...@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) ...@@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
{ {
dm_block_t count;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b); int r = sm_metadata_new_block_(sm, b);
if (r) if (r)
DMERR("unable to allocate new metadata block"); DMERR("unable to allocate new metadata block");
r = sm_metadata_get_nr_free(sm, &count);
if (r)
DMERR("couldn't get free block count");
check_threshold(&smm->threshold, count);
return r; return r;
} }
...@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm) ...@@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm)
return 0; return 0;
} }
static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
set_threshold(&smm->threshold, threshold, fn, context);
return 0;
}
static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
{ {
*result = sizeof(struct disk_sm_root); *result = sizeof(struct disk_sm_root);
...@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t ...@@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t
return 0; return 0;
} }
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
static struct dm_space_map ops = { static struct dm_space_map ops = {
.destroy = sm_metadata_destroy, .destroy = sm_metadata_destroy,
.extend = sm_metadata_extend, .extend = sm_metadata_extend,
...@@ -395,7 +464,8 @@ static struct dm_space_map ops = { ...@@ -395,7 +464,8 @@ static struct dm_space_map ops = {
.new_block = sm_metadata_new_block, .new_block = sm_metadata_new_block,
.commit = sm_metadata_commit, .commit = sm_metadata_commit,
.root_size = sm_metadata_root_size, .root_size = sm_metadata_root_size,
.copy_root = sm_metadata_copy_root .copy_root = sm_metadata_copy_root,
.register_threshold_callback = sm_metadata_register_threshold_callback
}; };
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
...@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm) ...@@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm)
static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{ {
DMERR("boostrap doesn't support extend"); DMERR("bootstrap doesn't support extend");
return -EINVAL; return -EINVAL;
} }
...@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, ...@@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
uint32_t count) uint32_t count)
{ {
DMERR("boostrap doesn't support set_count"); DMERR("bootstrap doesn't support set_count");
return -EINVAL; return -EINVAL;
} }
...@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm) ...@@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm)
static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
{ {
DMERR("boostrap doesn't support root_size"); DMERR("bootstrap doesn't support root_size");
return -EINVAL; return -EINVAL;
} }
...@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) ...@@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
size_t max) size_t max)
{ {
DMERR("boostrap doesn't support copy_root"); DMERR("bootstrap doesn't support copy_root");
return -EINVAL; return -EINVAL;
} }
...@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = { ...@@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = {
.new_block = sm_bootstrap_new_block, .new_block = sm_bootstrap_new_block,
.commit = sm_bootstrap_commit, .commit = sm_bootstrap_commit,
.root_size = sm_bootstrap_root_size, .root_size = sm_bootstrap_root_size,
.copy_root = sm_bootstrap_copy_root .copy_root = sm_bootstrap_copy_root,
.register_threshold_callback = NULL
}; };
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
int r, i;
enum allocation_event ev;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
dm_block_t old_len = smm->ll.nr_blocks;
/*
* Flick into a mode where all blocks get allocated in the new area.
*/
smm->begin = old_len;
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
/*
* Extend.
*/
r = sm_ll_extend(&smm->ll, extra_blocks);
/*
* Switch back to normal behaviour.
*/
memcpy(&smm->sm, &ops, sizeof(smm->sm));
for (i = old_len; !r && i < smm->begin; i++)
r = sm_ll_inc(&smm->ll, i, &ev);
return r;
}
/*----------------------------------------------------------------*/
struct dm_space_map *dm_sm_metadata_init(void) struct dm_space_map *dm_sm_metadata_init(void)
{ {
struct sm_metadata *smm; struct sm_metadata *smm;
...@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, ...@@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
smm->recursion_count = 0; smm->recursion_count = 0;
smm->allocated_this_transaction = 0; smm->allocated_this_transaction = 0;
smm->nr_uncommitted = 0; smm->nr_uncommitted = 0;
threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
...@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, ...@@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
smm->recursion_count = 0; smm->recursion_count = 0;
smm->allocated_this_transaction = 0; smm->allocated_this_transaction = 0;
smm->nr_uncommitted = 0; smm->nr_uncommitted = 0;
threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
return 0; return 0;
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include "dm-block-manager.h" #include "dm-block-manager.h"
typedef void (*dm_sm_threshold_fn)(void *context);
/* /*
* struct dm_space_map keeps a record of how many times each block in a device * struct dm_space_map keeps a record of how many times each block in a device
* is referenced. It needs to be fixed on disk as part of the transaction. * is referenced. It needs to be fixed on disk as part of the transaction.
...@@ -59,6 +61,15 @@ struct dm_space_map { ...@@ -59,6 +61,15 @@ struct dm_space_map {
*/ */
int (*root_size)(struct dm_space_map *sm, size_t *result); int (*root_size)(struct dm_space_map *sm, size_t *result);
int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
/*
* You can register one threshold callback which is edge-triggered
* when the free space in the space map drops below the threshold.
*/
int (*register_threshold_callback)(struct dm_space_map *sm,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context);
}; };
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
...@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le ...@@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le
return sm->copy_root(sm, copy_to_here_le, len); return sm->copy_root(sm, copy_to_here_le, len);
} }
static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
dm_block_t threshold,
dm_sm_threshold_fn fn,
void *context)
{
if (sm->register_threshold_callback)
return sm->register_threshold_callback(sm, threshold, fn, context);
return -EINVAL;
}
#endif /* _LINUX_DM_SPACE_MAP_H */ #endif /* _LINUX_DM_SPACE_MAP_H */
...@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd, ...@@ -79,11 +79,26 @@ typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
struct bio_vec *biovec, int max_size); struct bio_vec *biovec, int max_size);
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
* E.g. Does at least one underlying device support flush?
* Does any underlying device not support WRITE_SAME?
*
* The callout function is called once for each contiguous section of
* an underlying device. State can be maintained in *data.
* Return non-zero to stop iterating through any further devices.
*/
typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
struct dm_dev *dev, struct dm_dev *dev,
sector_t start, sector_t len, sector_t start, sector_t len,
void *data); void *data);
/*
* This function must iterate through each section of device used by the
* target until it encounters a non-zero return code, which it then returns.
* Returns zero if no callout returned non-zero.
*/
typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
iterate_devices_callout_fn fn, iterate_devices_callout_fn fn,
void *data); void *data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment