Commit f6007dce authored by Mikulas Patocka's avatar Mikulas Patocka Committed by Mike Snitzer

dm: fix a race condition in retrieve_deps

There's a race condition in the multipath target when retrieve_deps
races with multipath_message calling dm_get_device and dm_put_device.
retrieve_deps walks the list of open devices without holding any lock
but multipath may add or remove devices to the list while it is
running. The end result may be memory corruption or use-after-free
memory access.

See this description of a UAF with multipath_message():
https://listman.redhat.com/archives/dm-devel/2022-October/052373.html

Fix this bug by introducing a new rw semaphore "devices_lock". We grab
devices_lock for read in retrieve_deps and we grab it for write in
dm_get_device and dm_put_device.
Reported-by: default avatarLuo Meng <luomeng12@huawei.com>
Signed-off-by: default avatarMikulas Patocka <mpatocka@redhat.com>
Cc: stable@vger.kernel.org
Tested-by: default avatarLi Lingfeng <lilingfeng3@huawei.com>
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 0bb80ecc
......@@ -214,6 +214,7 @@ struct dm_table {
/* a list of devices used by this table */
struct list_head devices;
struct rw_semaphore devices_lock;
/* events get handed up using this callback */
void (*event_fn)(void *data);
......
......@@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_table *table,
struct dm_dev_internal *dd;
struct dm_target_deps *deps;
down_read(&table->devices_lock);
deps = get_result_buffer(param, param_size, &len);
/*
......@@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_table *table,
needed = struct_size(deps, dev, count);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
return;
goto out;
}
/*
......@@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_table *table,
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
param->data_size = param->data_start + needed;
out:
up_read(&table->devices_lock);
}
static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
......
......@@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode,
return -ENOMEM;
INIT_LIST_HEAD(&t->devices);
init_rwsem(&t->devices_lock);
if (!num_targets)
num_targets = KEYS_PER_NODE;
......@@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
if (dev == disk_devt(t->md->disk))
return -EINVAL;
down_write(&t->devices_lock);
dd = find_device(&t->devices, dev);
if (!dd) {
dd = kmalloc(sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
if (!dd) {
r = -ENOMEM;
goto unlock_ret_r;
}
r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
if (r) {
kfree(dd);
return r;
goto unlock_ret_r;
}
refcount_set(&dd->count, 1);
......@@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode,
} else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) {
r = upgrade_mode(dd, mode, t->md);
if (r)
return r;
goto unlock_ret_r;
}
refcount_inc(&dd->count);
out:
up_write(&t->devices_lock);
*result = dd->dm_dev;
return 0;
unlock_ret_r:
up_write(&t->devices_lock);
return r;
}
EXPORT_SYMBOL(dm_get_device);
......@@ -419,9 +429,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
void dm_put_device(struct dm_target *ti, struct dm_dev *d)
{
int found = 0;
struct list_head *devices = &ti->table->devices;
struct dm_table *t = ti->table;
struct list_head *devices = &t->devices;
struct dm_dev_internal *dd;
down_write(&t->devices_lock);
list_for_each_entry(dd, devices, list) {
if (dd->dm_dev == d) {
found = 1;
......@@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d)
}
if (!found) {
DMERR("%s: device %s not in table devices list",
dm_device_name(ti->table->md), d->name);
return;
dm_device_name(t->md), d->name);
goto unlock_ret;
}
if (refcount_dec_and_test(&dd->count)) {
dm_put_table_device(ti->table->md, d);
dm_put_table_device(t->md, d);
list_del(&dd->list);
kfree(dd);
}
unlock_ret:
up_write(&t->devices_lock);
}
EXPORT_SYMBOL(dm_put_device);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment