Commit c3c4555e authored by Milan Broz's avatar Milan Broz Committed by Alasdair G Kergon

dm table: clear add_random unless all devices have it set

Always clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
have it set. Otherwise devices with predictable characteristics may
contribute entropy.

QUEUE_FLAG_ADD_RANDOM specifies whether or not queue IO timings
contribute to the random pool.

For bio-based targets this flag is always 0 because such devices have no
real queue.

For request-based devices this flag was always set to 1 by default.

Now set it according to the flags on underlying devices. If there is at
least one device which should not contribute, set the flag to zero: If a
device, such as fast SSD storage, is not suitable for supplying entropy,
a request-based queue stacked over it will not be either.

Because the checking logic is exactly same as for the rotational flag,
share the iteration function with device_is_nonrot().
Signed-off-by: default avatarMilan Broz <mbroz@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
parent ba1cbad9
...@@ -1354,17 +1354,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, ...@@ -1354,17 +1354,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
return q && blk_queue_nonrot(q); return q && blk_queue_nonrot(q);
} }
static bool dm_table_is_nonrot(struct dm_table *t) static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
struct request_queue *q = bdev_get_queue(dev->bdev);
return q && !blk_queue_add_random(q);
}
static bool dm_table_all_devices_attribute(struct dm_table *t,
iterate_devices_callout_fn func)
{ {
struct dm_target *ti; struct dm_target *ti;
unsigned i = 0; unsigned i = 0;
/* Ensure that all underlying device are non-rotational. */
while (i < dm_table_get_num_targets(t)) { while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++); ti = dm_table_get_target(t, i++);
if (!ti->type->iterate_devices || if (!ti->type->iterate_devices ||
!ti->type->iterate_devices(ti, device_is_nonrot, NULL)) !ti->type->iterate_devices(ti, func, NULL))
return 0; return 0;
} }
...@@ -1396,13 +1404,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, ...@@ -1396,13 +1404,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (!dm_table_discard_zeroes_data(t)) if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0; q->limits.discard_zeroes_data = 0;
if (dm_table_is_nonrot(t)) /* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
else else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
dm_table_set_integrity(t); dm_table_set_integrity(t);
/*
* Determine whether or not this queue's I/O timings contribute
* to the entropy pool, Only request-based targets use this.
* Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
* have it set.
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
/* /*
* QUEUE_FLAG_STACKABLE must be set after all queue settings are * QUEUE_FLAG_STACKABLE must be set after all queue settings are
* visible to other CPUs because, once the flag is set, incoming bios * visible to other CPUs because, once the flag is set, incoming bios
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment