Commit a911dcdb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dm-3.20-changes-2' of...

Merge tag 'dm-3.20-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull more device mapper changes from Mike Snitzer:

- Significant dm-crypt CPU scalability performance improvements thanks
  to changes that enable effective use of an unbound workqueue across
  all available CPUs.  A large battery of tests were performed to
  validate these changes, summary of results is available here:
  https://www.redhat.com/archives/dm-devel/2015-February/msg00106.html

- A few additional stable fixes (to DM core, dm-snapshot and dm-mirror)
  and a small fix to the dm-space-map-disk.

* tag 'dm-3.20-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm snapshot: fix a possible invalid memory access on unload
  dm: fix a race condition in dm_get_md
  dm crypt: sort writes
  dm crypt: add 'submit_from_crypt_cpus' option
  dm crypt: offload writes to thread
  dm crypt: remove unused io_pool and _crypt_io_pool
  dm crypt: avoid deadlock in mempools
  dm crypt: don't allocate pages for a partial request
  dm crypt: use unbound workqueue for request processing
  dm io: reject unsupported DISCARD requests with EOPNOTSUPP
  dm mirror: do not degrade the mirror on discard error
  dm space map disk: fix sm_disk_count_is_more_than_one()
parents e20d3ef5 22aa66a3
...@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \ ...@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
Otherwise #opt_params is the number of following arguments. Otherwise #opt_params is the number of following arguments.
Example of optional parameters section: Example of optional parameters section:
1 allow_discards 3 allow_discards same_cpu_crypt submit_from_crypt_cpus
allow_discards allow_discards
Block discard requests (a.k.a. TRIM) are passed through the crypt device. Block discard requests (a.k.a. TRIM) are passed through the crypt device.
...@@ -63,6 +63,19 @@ allow_discards ...@@ -63,6 +63,19 @@ allow_discards
used space etc.) if the discarded blocks can be located easily on the used space etc.) if the discarded blocks can be located easily on the
device later. device later.
same_cpu_crypt
Perform encryption using the same cpu that IO was submitted on.
The default is to use an unbound workqueue so that encryption work
is automatically balanced between available CPUs.
submit_from_crypt_cpus
Disable offloading writes to a separate thread after encryption.
There are some situations where offloading write bios from the
encryption threads to a single thread degrades performance
significantly. The default is to offload write bios to the same
thread because it benefits CFQ to have writes submitted using the
same context.
Example scripts Example scripts
=============== ===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
......
This diff is collapsed.
...@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, ...@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned short logical_block_size = queue_logical_block_size(q); unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors; sector_t num_sectors;
/* Reject unsupported discard requests */
if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
dec_count(io, region, -EOPNOTSUPP);
return;
}
/* /*
* where->count may be zero if rw holds a flush and we need to * where->count may be zero if rw holds a flush and we need to
* send a zero-sized flush. * send a zero-sized flush.
......
...@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context) ...@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
return; return;
} }
/*
* If the bio is discard, return an error, but do not
* degrade the array.
*/
if (bio->bi_rw & REQ_DISCARD) {
bio_endio(bio, -EOPNOTSUPP);
return;
}
for (i = 0; i < ms->nr_mirrors; i++) for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error)) if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
......
...@@ -1432,8 +1432,6 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) ...@@ -1432,8 +1432,6 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
full_bio->bi_private = pe->full_bio_private; full_bio->bi_private = pe->full_bio_private;
atomic_inc(&full_bio->bi_remaining); atomic_inc(&full_bio->bi_remaining);
} }
free_pending_exception(pe);
increment_pending_exceptions_done_count(); increment_pending_exceptions_done_count();
up_write(&s->lock); up_write(&s->lock);
...@@ -1450,6 +1448,8 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success) ...@@ -1450,6 +1448,8 @@ static void pending_complete(struct dm_snap_pending_exception *pe, int success)
} }
retry_origin_bios(s, origin_bios); retry_origin_bios(s, origin_bios);
free_pending_exception(pe);
} }
static void commit_callback(void *context, int success) static void commit_callback(void *context, int success)
......
...@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md) ...@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
return 0; return 0;
} }
static struct mapped_device *dm_find_md(dev_t dev) struct mapped_device *dm_get_md(dev_t dev)
{ {
struct mapped_device *md; struct mapped_device *md;
unsigned minor = MINOR(dev); unsigned minor = MINOR(dev);
...@@ -2582,29 +2582,22 @@ static struct mapped_device *dm_find_md(dev_t dev) ...@@ -2582,29 +2582,22 @@ static struct mapped_device *dm_find_md(dev_t dev)
spin_lock(&_minor_lock); spin_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor); md = idr_find(&_minor_idr, minor);
if (md && (md == MINOR_ALLOCED || if (md) {
if ((md == MINOR_ALLOCED ||
(MINOR(disk_devt(dm_disk(md))) != minor) || (MINOR(disk_devt(dm_disk(md))) != minor) ||
dm_deleting_md(md) || dm_deleting_md(md) ||
test_bit(DMF_FREEING, &md->flags))) { test_bit(DMF_FREEING, &md->flags))) {
md = NULL; md = NULL;
goto out; goto out;
} }
dm_get(md);
}
out: out:
spin_unlock(&_minor_lock); spin_unlock(&_minor_lock);
return md; return md;
} }
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md = dm_find_md(dev);
if (md)
dm_get(md);
return md;
}
EXPORT_SYMBOL_GPL(dm_get_md); EXPORT_SYMBOL_GPL(dm_get_md);
void *dm_get_mdptr(struct mapped_device *md) void *dm_get_mdptr(struct mapped_device *md)
......
...@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, ...@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
if (r) if (r)
return r; return r;
return count > 1; *result = count > 1;
return 0;
} }
static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment