Commit e2e1a148 authored by Jens Axboe's avatar Jens Axboe

block: add sysfs knob for turning off disk entropy contributions

There are two reasons for doing this:

- On SSD disks, the completion times aren't as random as they
  are for rotational drives. So it's questionable whether they
  should contribute to the random pool in the first place.

- Calling add_disk_randomness() has a lot of overhead.

This adds /sys/block/<dev>/queue/add_random that will allow you to
switch off on a per-device basis. The default setting is on, so there
should be no functional changes from this patch.
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 841fdffd
...@@ -2111,7 +2111,8 @@ static bool blk_update_bidi_request(struct request *rq, int error, ...@@ -2111,7 +2111,8 @@ static bool blk_update_bidi_request(struct request *rq, int error,
blk_update_request(rq->next_rq, error, bidi_bytes)) blk_update_request(rq->next_rq, error, bidi_bytes))
return true; return true;
add_disk_randomness(rq->rq_disk); if (blk_queue_add_random(rq->q))
add_disk_randomness(rq->rq_disk);
return false; return false;
} }
......
...@@ -250,6 +250,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) ...@@ -250,6 +250,27 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
return ret; return ret;
} }
static ssize_t queue_random_show(struct request_queue *q, char *page)
{
return queue_var_show(blk_queue_add_random(q), page);
}
static ssize_t queue_random_store(struct request_queue *q, const char *page,
size_t count)
{
unsigned long val;
ssize_t ret = queue_var_store(&val, page, count);
spin_lock_irq(q->queue_lock);
if (val)
queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
else
queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
spin_unlock_irq(q->queue_lock);
return ret;
}
static ssize_t queue_iostats_show(struct request_queue *q, char *page) static ssize_t queue_iostats_show(struct request_queue *q, char *page)
{ {
return queue_var_show(blk_queue_io_stat(q), page); return queue_var_show(blk_queue_io_stat(q), page);
...@@ -374,6 +395,12 @@ static struct queue_sysfs_entry queue_iostats_entry = { ...@@ -374,6 +395,12 @@ static struct queue_sysfs_entry queue_iostats_entry = {
.store = queue_iostats_store, .store = queue_iostats_store,
}; };
static struct queue_sysfs_entry queue_random_entry = {
.attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
.show = queue_random_show,
.store = queue_random_store,
};
static struct attribute *default_attrs[] = { static struct attribute *default_attrs[] = {
&queue_requests_entry.attr, &queue_requests_entry.attr,
&queue_ra_entry.attr, &queue_ra_entry.attr,
...@@ -394,6 +421,7 @@ static struct attribute *default_attrs[] = { ...@@ -394,6 +421,7 @@ static struct attribute *default_attrs[] = {
&queue_nomerges_entry.attr, &queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr, &queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr, &queue_iostats_entry.attr,
&queue_random_entry.attr,
NULL, NULL,
}; };
......
...@@ -467,11 +467,13 @@ struct request_queue ...@@ -467,11 +467,13 @@ struct request_queue
#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \ (1 << QUEUE_FLAG_CLUSTER) | \
(1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_STACKABLE) | \
(1 << QUEUE_FLAG_SAME_COMP)) (1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
static inline int queue_is_locked(struct request_queue *q) static inline int queue_is_locked(struct request_queue *q)
{ {
...@@ -596,6 +598,7 @@ enum { ...@@ -596,6 +598,7 @@ enum {
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_flushing(q) ((q)->ordseq) #define blk_queue_flushing(q) ((q)->ordseq)
#define blk_queue_stackable(q) \ #define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment