Commit 802ea9d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper changes from Mike Snitzer:

 - The most significant change this cycle is request-based DM now
   supports stacking ontop of blk-mq devices.  This blk-mq support
   changes the model request-based DM uses for cloning a request to
   relying on calling blk_get_request() directly from the underlying
   blk-mq device.

   An early consumer of this code is Intel's emerging NVMe hardware;
   thanks to Keith Busch for working on, and pushing for, these changes.

 - A few other small fixes and cleanups across other DM targets.

* tag 'dm-3.20-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm: inherit QUEUE_FLAG_SG_GAPS flags from underlying queues
  dm snapshot: remove unnecessary NULL checks before vfree() calls
  dm mpath: simplify failure path of dm_multipath_init()
  dm thin metadata: remove unused dm_pool_get_data_block_size()
  dm ioctl: fix stale comment above dm_get_inactive_table()
  dm crypt: update url in CONFIG_DM_CRYPT help text
  dm bufio: fix time comparison to use time_after_eq()
  dm: use time_in_range() and time_after()
  dm raid: fix a couple integer overflows
  dm table: train hybrid target type detection to select blk-mq if appropriate
  dm: allocate requests in target when stacking on blk-mq devices
  dm: prepare for allocating blk-mq clone requests in target
  dm: submit stacked requests in irq enabled context
  dm: split request structure out from dm_rq_target_io structure
  dm: remove exports for request-based interfaces without external callers
parents 8494bcf5 a4afe76b
...@@ -231,9 +231,8 @@ config DM_CRYPT ...@@ -231,9 +231,8 @@ config DM_CRYPT
transparently encrypts the data on it. You'll need to activate transparently encrypts the data on it. You'll need to activate
the ciphers you're going to use in the cryptoapi configuration. the ciphers you're going to use in the cryptoapi configuration.
Information on how to use dm-crypt can be found on For further information on dm-crypt and userspace tools see:
<http://code.google.com/p/cryptsetup/wiki/DMCrypt>
<http://www.saout.de/misc/dm-crypt/>
To compile this code as a module, choose M here: the module will To compile this code as a module, choose M here: the module will
be called dm-crypt. be called dm-crypt.
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/device-mapper.h> #include <linux/device-mapper.h>
#include <linux/dm-io.h> #include <linux/dm-io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/shrinker.h> #include <linux/shrinker.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -1739,7 +1740,7 @@ static unsigned get_max_age_hz(void) ...@@ -1739,7 +1740,7 @@ static unsigned get_max_age_hz(void)
static bool older_than(struct dm_buffer *b, unsigned long age_hz) static bool older_than(struct dm_buffer *b, unsigned long age_hz)
{ {
return (jiffies - b->last_accessed) >= age_hz; return time_after_eq(jiffies, b->last_accessed + age_hz);
} }
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/dm-io.h> #include <linux/dm-io.h>
#include <linux/dm-kcopyd.h> #include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -1562,8 +1563,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, ...@@ -1562,8 +1563,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
static int need_commit_due_to_time(struct cache *cache) static int need_commit_due_to_time(struct cache *cache)
{ {
return jiffies < cache->last_commit_jiffies || return !time_in_range(jiffies, cache->last_commit_jiffies,
jiffies > cache->last_commit_jiffies + COMMIT_PERIOD; cache->last_commit_jiffies + COMMIT_PERIOD);
} }
static int commit_if_needed(struct cache *cache) static int commit_if_needed(struct cache *cache)
......
...@@ -639,8 +639,8 @@ static int check_name(const char *name) ...@@ -639,8 +639,8 @@ static int check_name(const char *name)
/* /*
* On successful return, the caller must not attempt to acquire * On successful return, the caller must not attempt to acquire
* _hash_lock without first calling dm_table_put, because dm_table_destroy * _hash_lock without first calling dm_put_live_table, because dm_table_destroy
* waits for this dm_table_put and could be called under this lock. * waits for this dm_put_live_table and could be called under this lock.
*/ */
static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx) static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
{ {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/dm-dirty-log.h> #include <linux/dm-dirty-log.h>
#include <linux/device-mapper.h> #include <linux/device-mapper.h>
#include <linux/dm-log-userspace.h> #include <linux/dm-log-userspace.h>
...@@ -829,7 +830,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log, ...@@ -829,7 +830,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log,
int r; int r;
uint64_t region64 = region; uint64_t region64 = region;
struct log_c *lc = log->context; struct log_c *lc = log->context;
static unsigned long long limit; static unsigned long limit;
struct { struct {
int64_t is_recovering; int64_t is_recovering;
uint64_t in_sync_hint; uint64_t in_sync_hint;
...@@ -845,7 +846,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log, ...@@ -845,7 +846,7 @@ static int userspace_is_remote_recovering(struct dm_dirty_log *log,
*/ */
if (region < lc->in_sync_hint) if (region < lc->in_sync_hint)
return 0; return 0;
else if (jiffies < limit) else if (time_after(limit, jiffies))
return 1; return 1;
limit = jiffies + (HZ / 4); limit = jiffies + (HZ / 4);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "dm-path-selector.h" #include "dm-path-selector.h"
#include "dm-uevent.h" #include "dm-uevent.h"
#include <linux/blkdev.h>
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mempool.h> #include <linux/mempool.h>
...@@ -378,18 +379,18 @@ static int __must_push_back(struct multipath *m) ...@@ -378,18 +379,18 @@ static int __must_push_back(struct multipath *m)
/* /*
* Map cloned requests * Map cloned requests
*/ */
static int multipath_map(struct dm_target *ti, struct request *clone, static int __multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context) union map_info *map_context,
struct request *rq, struct request **__clone)
{ {
struct multipath *m = (struct multipath *) ti->private; struct multipath *m = (struct multipath *) ti->private;
int r = DM_MAPIO_REQUEUE; int r = DM_MAPIO_REQUEUE;
size_t nr_bytes = blk_rq_bytes(clone); size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
unsigned long flags;
struct pgpath *pgpath; struct pgpath *pgpath;
struct block_device *bdev; struct block_device *bdev;
struct dm_mpath_io *mpio; struct dm_mpath_io *mpio;
spin_lock_irqsave(&m->lock, flags); spin_lock_irq(&m->lock);
/* Do we need to select a new pgpath? */ /* Do we need to select a new pgpath? */
if (!m->current_pgpath || if (!m->current_pgpath ||
...@@ -411,25 +412,61 @@ static int multipath_map(struct dm_target *ti, struct request *clone, ...@@ -411,25 +412,61 @@ static int multipath_map(struct dm_target *ti, struct request *clone,
/* ENOMEM, requeue */ /* ENOMEM, requeue */
goto out_unlock; goto out_unlock;
bdev = pgpath->path.dev->bdev;
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
mpio = map_context->ptr; mpio = map_context->ptr;
mpio->pgpath = pgpath; mpio->pgpath = pgpath;
mpio->nr_bytes = nr_bytes; mpio->nr_bytes = nr_bytes;
bdev = pgpath->path.dev->bdev;
spin_unlock_irq(&m->lock);
if (clone) {
/* Old request-based interface: allocated clone is passed in */
clone->q = bdev_get_queue(bdev);
clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
} else {
/* blk-mq request-based interface */
*__clone = blk_get_request(bdev_get_queue(bdev),
rq_data_dir(rq), GFP_KERNEL);
if (IS_ERR(*__clone))
/* ENOMEM, requeue */
return r;
(*__clone)->bio = (*__clone)->biotail = NULL;
(*__clone)->rq_disk = bdev->bd_disk;
(*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
}
if (pgpath->pg->ps.type->start_io) if (pgpath->pg->ps.type->start_io)
pgpath->pg->ps.type->start_io(&pgpath->pg->ps, pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
&pgpath->path, &pgpath->path,
nr_bytes); nr_bytes);
r = DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
out_unlock: out_unlock:
spin_unlock_irqrestore(&m->lock, flags); spin_unlock_irq(&m->lock);
return r; return r;
} }
static int multipath_map(struct dm_target *ti, struct request *clone,
union map_info *map_context)
{
return __multipath_map(ti, clone, map_context, NULL, NULL);
}
static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
{
return __multipath_map(ti, NULL, map_context, rq, clone);
}
static void multipath_release_clone(struct request *clone)
{
blk_put_request(clone);
}
/* /*
* If we run out of usable paths, should we queue I/O or error it? * If we run out of usable paths, should we queue I/O or error it?
*/ */
...@@ -1666,11 +1703,13 @@ static int multipath_busy(struct dm_target *ti) ...@@ -1666,11 +1703,13 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
static struct target_type multipath_target = { static struct target_type multipath_target = {
.name = "multipath", .name = "multipath",
.version = {1, 7, 0}, .version = {1, 8, 0},
.module = THIS_MODULE, .module = THIS_MODULE,
.ctr = multipath_ctr, .ctr = multipath_ctr,
.dtr = multipath_dtr, .dtr = multipath_dtr,
.map_rq = multipath_map, .map_rq = multipath_map,
.clone_and_map_rq = multipath_clone_and_map,
.release_clone_rq = multipath_release_clone,
.rq_end_io = multipath_end_io, .rq_end_io = multipath_end_io,
.presuspend = multipath_presuspend, .presuspend = multipath_presuspend,
.postsuspend = multipath_postsuspend, .postsuspend = multipath_postsuspend,
...@@ -1694,16 +1733,15 @@ static int __init dm_multipath_init(void) ...@@ -1694,16 +1733,15 @@ static int __init dm_multipath_init(void)
r = dm_register_target(&multipath_target); r = dm_register_target(&multipath_target);
if (r < 0) { if (r < 0) {
DMERR("register failed %d", r); DMERR("register failed %d", r);
kmem_cache_destroy(_mpio_cache); r = -EINVAL;
return -EINVAL; goto bad_register_target;
} }
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) { if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd"); DMERR("failed to create workqueue kmpathd");
dm_unregister_target(&multipath_target); r = -ENOMEM;
kmem_cache_destroy(_mpio_cache); goto bad_alloc_kmultipathd;
return -ENOMEM;
} }
/* /*
...@@ -1716,16 +1754,23 @@ static int __init dm_multipath_init(void) ...@@ -1716,16 +1754,23 @@ static int __init dm_multipath_init(void)
WQ_MEM_RECLAIM); WQ_MEM_RECLAIM);
if (!kmpath_handlerd) { if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd"); DMERR("failed to create workqueue kmpath_handlerd");
destroy_workqueue(kmultipathd); r = -ENOMEM;
dm_unregister_target(&multipath_target); goto bad_alloc_kmpath_handlerd;
kmem_cache_destroy(_mpio_cache);
return -ENOMEM;
} }
DMINFO("version %u.%u.%u loaded", DMINFO("version %u.%u.%u loaded",
multipath_target.version[0], multipath_target.version[1], multipath_target.version[0], multipath_target.version[1],
multipath_target.version[2]); multipath_target.version[2]);
return 0;
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
bad_alloc_kmultipathd:
dm_unregister_target(&multipath_target);
bad_register_target:
kmem_cache_destroy(_mpio_cache);
return r; return r;
} }
......
...@@ -1237,7 +1237,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1237,7 +1237,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
argv++; argv++;
/* Skip over RAID params for now and find out # of devices */ /* Skip over RAID params for now and find out # of devices */
if (num_raid_params + 1 > argc) { if (num_raid_params >= argc) {
ti->error = "Arguments do not agree with counts given"; ti->error = "Arguments do not agree with counts given";
return -EINVAL; return -EINVAL;
} }
...@@ -1248,6 +1248,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1248,6 +1248,12 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
return -EINVAL; return -EINVAL;
} }
argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
if (argc != (num_raid_devs * 2)) {
ti->error = "Supplied RAID devices does not match the count given";
return -EINVAL;
}
rs = context_alloc(ti, rt, (unsigned)num_raid_devs); rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
if (IS_ERR(rs)) if (IS_ERR(rs))
return PTR_ERR(rs); return PTR_ERR(rs);
...@@ -1256,16 +1262,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) ...@@ -1256,16 +1262,8 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (ret) if (ret)
goto bad; goto bad;
ret = -EINVAL;
argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
argv += num_raid_params + 1; argv += num_raid_params + 1;
if (argc != (num_raid_devs * 2)) {
ti->error = "Supplied RAID devices does not match the count given";
goto bad;
}
ret = dev_parms(rs, argv); ret = dev_parms(rs, argv);
if (ret) if (ret)
goto bad; goto bad;
......
...@@ -200,16 +200,11 @@ static int alloc_area(struct pstore *ps) ...@@ -200,16 +200,11 @@ static int alloc_area(struct pstore *ps)
static void free_area(struct pstore *ps) static void free_area(struct pstore *ps)
{ {
if (ps->area) vfree(ps->area);
vfree(ps->area);
ps->area = NULL; ps->area = NULL;
vfree(ps->zero_area);
if (ps->zero_area)
vfree(ps->zero_area);
ps->zero_area = NULL; ps->zero_area = NULL;
vfree(ps->header_area);
if (ps->header_area)
vfree(ps->header_area);
ps->header_area = NULL; ps->header_area = NULL;
} }
...@@ -605,8 +600,7 @@ static void persistent_dtr(struct dm_exception_store *store) ...@@ -605,8 +600,7 @@ static void persistent_dtr(struct dm_exception_store *store)
free_area(ps); free_area(ps);
/* Allocated in persistent_read_metadata */ /* Allocated in persistent_read_metadata */
if (ps->callbacks) vfree(ps->callbacks);
vfree(ps->callbacks);
kfree(ps); kfree(ps);
} }
......
...@@ -827,10 +827,11 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -827,10 +827,11 @@ static int dm_table_set_type(struct dm_table *t)
{ {
unsigned i; unsigned i;
unsigned bio_based = 0, request_based = 0, hybrid = 0; unsigned bio_based = 0, request_based = 0, hybrid = 0;
bool use_blk_mq = false;
struct dm_target *tgt; struct dm_target *tgt;
struct dm_dev_internal *dd; struct dm_dev_internal *dd;
struct list_head *devices; struct list_head *devices;
unsigned live_md_type; unsigned live_md_type = dm_get_md_type(t->md);
for (i = 0; i < t->num_targets; i++) { for (i = 0; i < t->num_targets; i++) {
tgt = t->targets + i; tgt = t->targets + i;
...@@ -854,8 +855,8 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -854,8 +855,8 @@ static int dm_table_set_type(struct dm_table *t)
* Determine the type from the live device. * Determine the type from the live device.
* Default to bio-based if device is new. * Default to bio-based if device is new.
*/ */
live_md_type = dm_get_md_type(t->md); if (live_md_type == DM_TYPE_REQUEST_BASED ||
if (live_md_type == DM_TYPE_REQUEST_BASED) live_md_type == DM_TYPE_MQ_REQUEST_BASED)
request_based = 1; request_based = 1;
else else
bio_based = 1; bio_based = 1;
...@@ -869,16 +870,6 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -869,16 +870,6 @@ static int dm_table_set_type(struct dm_table *t)
BUG_ON(!request_based); /* No targets in this table */ BUG_ON(!request_based); /* No targets in this table */
/* Non-request-stackable devices can't be used for request-based dm */
devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
if (!blk_queue_stackable(bdev_get_queue(dd->dm_dev->bdev))) {
DMWARN("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
}
}
/* /*
* Request-based dm supports only tables that have a single target now. * Request-based dm supports only tables that have a single target now.
* To support multiple targets, request splitting support is needed, * To support multiple targets, request splitting support is needed,
...@@ -890,7 +881,37 @@ static int dm_table_set_type(struct dm_table *t) ...@@ -890,7 +881,37 @@ static int dm_table_set_type(struct dm_table *t)
return -EINVAL; return -EINVAL;
} }
t->type = DM_TYPE_REQUEST_BASED; /* Non-request-stackable devices can't be used for request-based dm */
devices = dm_table_get_devices(t);
list_for_each_entry(dd, devices, list) {
struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
if (!blk_queue_stackable(q)) {
DMERR("table load rejected: including"
" non-request-stackable devices");
return -EINVAL;
}
if (q->mq_ops)
use_blk_mq = true;
}
if (use_blk_mq) {
/* verify _all_ devices in the table are blk-mq devices */
list_for_each_entry(dd, devices, list)
if (!bdev_get_queue(dd->dm_dev->bdev)->mq_ops) {
DMERR("table load rejected: not all devices"
" are blk-mq request-stackable");
return -EINVAL;
}
t->type = DM_TYPE_MQ_REQUEST_BASED;
} else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
/* inherit live MD type */
t->type = live_md_type;
} else
t->type = DM_TYPE_REQUEST_BASED;
return 0; return 0;
} }
...@@ -907,7 +928,15 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) ...@@ -907,7 +928,15 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
bool dm_table_request_based(struct dm_table *t) bool dm_table_request_based(struct dm_table *t)
{ {
return dm_table_get_type(t) == DM_TYPE_REQUEST_BASED; unsigned table_type = dm_table_get_type(t);
return (table_type == DM_TYPE_REQUEST_BASED ||
table_type == DM_TYPE_MQ_REQUEST_BASED);
}
bool dm_table_mq_request_based(struct dm_table *t)
{
return dm_table_get_type(t) == DM_TYPE_MQ_REQUEST_BASED;
} }
static int dm_table_alloc_md_mempools(struct dm_table *t) static int dm_table_alloc_md_mempools(struct dm_table *t)
...@@ -1360,6 +1389,14 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev, ...@@ -1360,6 +1389,14 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
} }
static int queue_supports_sg_gaps(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !test_bit(QUEUE_FLAG_SG_GAPS, &q->queue_flags);
}
static bool dm_table_all_devices_attribute(struct dm_table *t, static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func) iterate_devices_callout_fn func)
{ {
...@@ -1480,6 +1517,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1480,6 +1517,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
else else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
if (dm_table_all_devices_attribute(t, queue_supports_sg_gaps))
queue_flag_clear_unlocked(QUEUE_FLAG_SG_GAPS, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_SG_GAPS, q);
dm_table_set_integrity(t); dm_table_set_integrity(t);
/* /*
......
...@@ -137,13 +137,26 @@ static int io_err_map_rq(struct dm_target *ti, struct request *clone, ...@@ -137,13 +137,26 @@ static int io_err_map_rq(struct dm_target *ti, struct request *clone,
return -EIO; return -EIO;
} }
static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
union map_info *map_context,
struct request **clone)
{
return -EIO;
}
static void io_err_release_clone_rq(struct request *clone)
{
}
static struct target_type error_target = { static struct target_type error_target = {
.name = "error", .name = "error",
.version = {1, 2, 0}, .version = {1, 3, 0},
.ctr = io_err_ctr, .ctr = io_err_ctr,
.dtr = io_err_dtr, .dtr = io_err_dtr,
.map = io_err_map, .map = io_err_map,
.map_rq = io_err_map_rq, .map_rq = io_err_map_rq,
.clone_and_map_rq = io_err_clone_and_map_rq,
.release_clone_rq = io_err_release_clone_rq,
}; };
int __init dm_target_init(void) int __init dm_target_init(void)
......
...@@ -1635,15 +1635,6 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, ...@@ -1635,15 +1635,6 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
return r; return r;
} }
int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
{
down_read(&pmd->root_lock);
*result = pmd->data_block_size;
up_read(&pmd->root_lock);
return 0;
}
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result) int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
{ {
int r = -EINVAL; int r = -EINVAL;
......
...@@ -182,8 +182,6 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd, ...@@ -182,8 +182,6 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
dm_block_t *result); dm_block_t *result);
int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/device-mapper.h> #include <linux/device-mapper.h>
#include <linux/dm-io.h> #include <linux/dm-io.h>
#include <linux/dm-kcopyd.h> #include <linux/dm-kcopyd.h>
#include <linux/jiffies.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/rculist.h> #include <linux/rculist.h>
...@@ -1700,8 +1701,8 @@ static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell ...@@ -1700,8 +1701,8 @@ static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell
*/ */
static int need_commit_due_to_time(struct pool *pool) static int need_commit_due_to_time(struct pool *pool)
{ {
return jiffies < pool->last_commit_jiffies || return !time_in_range(jiffies, pool->last_commit_jiffies,
jiffies > pool->last_commit_jiffies + COMMIT_PERIOD; pool->last_commit_jiffies + COMMIT_PERIOD);
} }
#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node) #define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
......
This diff is collapsed.
...@@ -34,9 +34,10 @@ ...@@ -34,9 +34,10 @@
/* /*
* Type of table and mapped_device's mempool * Type of table and mapped_device's mempool
*/ */
#define DM_TYPE_NONE 0 #define DM_TYPE_NONE 0
#define DM_TYPE_BIO_BASED 1 #define DM_TYPE_BIO_BASED 1
#define DM_TYPE_REQUEST_BASED 2 #define DM_TYPE_REQUEST_BASED 2
#define DM_TYPE_MQ_REQUEST_BASED 3
/* /*
* List of devices that a metadevice uses and should open/close. * List of devices that a metadevice uses and should open/close.
...@@ -73,6 +74,7 @@ int dm_table_any_busy_target(struct dm_table *t); ...@@ -73,6 +74,7 @@ int dm_table_any_busy_target(struct dm_table *t);
unsigned dm_table_get_type(struct dm_table *t); unsigned dm_table_get_type(struct dm_table *t);
struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t);
bool dm_table_mq_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t); void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
...@@ -99,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md); ...@@ -99,7 +101,8 @@ int dm_setup_md_queue(struct mapped_device *md);
/* /*
* To check whether the target type is request-based or not (bio-based). * To check whether the target type is request-based or not (bio-based).
*/ */
#define dm_target_request_based(t) ((t)->type->map_rq != NULL) #define dm_target_request_based(t) (((t)->type->map_rq != NULL) || \
((t)->type->clone_and_map_rq != NULL))
/* /*
* To check whether the target type is a hybrid (capable of being * To check whether the target type is a hybrid (capable of being
......
...@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti); ...@@ -48,6 +48,11 @@ typedef void (*dm_dtr_fn) (struct dm_target *ti);
typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone, typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
union map_info *map_context); union map_info *map_context);
typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
struct request *rq,
union map_info *map_context,
struct request **clone);
typedef void (*dm_release_clone_request_fn) (struct request *clone);
/* /*
* Returns: * Returns:
...@@ -143,6 +148,8 @@ struct target_type { ...@@ -143,6 +148,8 @@ struct target_type {
dm_dtr_fn dtr; dm_dtr_fn dtr;
dm_map_fn map; dm_map_fn map;
dm_map_request_fn map_rq; dm_map_request_fn map_rq;
dm_clone_and_map_request_fn clone_and_map_rq;
dm_release_clone_request_fn release_clone_rq;
dm_endio_fn end_io; dm_endio_fn end_io;
dm_request_endio_fn rq_end_io; dm_request_endio_fn rq_end_io;
dm_presuspend_fn presuspend; dm_presuspend_fn presuspend;
...@@ -600,9 +607,6 @@ static inline unsigned long to_bytes(sector_t n) ...@@ -600,9 +607,6 @@ static inline unsigned long to_bytes(sector_t n)
/*----------------------------------------------------------------- /*-----------------------------------------------------------------
* Helper for block layer and dm core operations * Helper for block layer and dm core operations
*---------------------------------------------------------------*/ *---------------------------------------------------------------*/
void dm_dispatch_request(struct request *rq);
void dm_requeue_unmapped_request(struct request *rq);
void dm_kill_unmapped_request(struct request *rq, int error);
int dm_underlying_device_busy(struct request_queue *q); int dm_underlying_device_busy(struct request_queue *q);
#endif /* _LINUX_DEVICE_MAPPER_H */ #endif /* _LINUX_DEVICE_MAPPER_H */
...@@ -267,9 +267,9 @@ enum { ...@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4 #define DM_VERSION_MAJOR 4
#define DM_VERSION_MINOR 29 #define DM_VERSION_MINOR 30
#define DM_VERSION_PATCHLEVEL 0 #define DM_VERSION_PATCHLEVEL 0
#define DM_VERSION_EXTRA "-ioctl (2014-10-28)" #define DM_VERSION_EXTRA "-ioctl (2014-12-22)"
/* Status bits */ /* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment