Commit 775f4b29 authored by Theodore Ts'o's avatar Theodore Ts'o

random: make 'add_interrupt_randomness()' do something sane

We've been moving away from add_interrupt_randomness() for various
reasons: it's too expensive to do on every interrupt, and flooding the
CPU with interrupts could theoretically cause bogus floods of entropy
from a somewhat externally controllable source.

This solves both problems by limiting the actual randomness addition
to just once a second or after 64 interrupts, whicever comes first.
During that time, the interrupt cycle data is buffered up in a per-cpu
pool.  Also, we make sure the the nonblocking pool used by urandom is
initialized before we start feeding the normal input pool.  This
assures that /dev/urandom is returning unpredictable data as soon as
possible.

(Based on an original patch by Linus, but significantly modified by
tytso.)
Tested-by: default avatarEric Wustrow <ewust@umich.edu>
Reported-by: default avatarEric Wustrow <ewust@umich.edu>
Reported-by: default avatarNadia Heninger <nadiah@cs.ucsd.edu>
Reported-by: default avatarZakir Durumeric <zakir@umich.edu>
Reported-by: J. Alex Halderman <jhalderm@umich.edu>.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
Cc: stable@vger.kernel.org
parent 74feec5d
...@@ -127,19 +127,15 @@ ...@@ -127,19 +127,15 @@
* *
* void add_input_randomness(unsigned int type, unsigned int code, * void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value); * unsigned int value);
* void add_interrupt_randomness(int irq); * void add_interrupt_randomness(int irq, int irq_flags);
* void add_disk_randomness(struct gendisk *disk); * void add_disk_randomness(struct gendisk *disk);
* *
* add_input_randomness() uses the input layer interrupt timing, as well as * add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware. * the event type information from the hardware.
* *
* add_interrupt_randomness() uses the inter-interrupt timing as random * add_interrupt_randomness() uses the interrupt timing as random
* inputs to the entropy pool. Note that not all interrupts are good * inputs to the entropy pool. Using the cycle counters and the irq source
* sources of randomness! For example, the timer interrupts is not a * as inputs, it feeds the randomness roughly once a second.
* good choice, because the periodicity of the interrupts is too
* regular, and hence predictable to an attacker. Network Interface
* Controller interrupts are a better measure, since the timing of the
* NIC interrupts are more unpredictable.
* *
* add_disk_randomness() uses what amounts to the seek time of block * add_disk_randomness() uses what amounts to the seek time of block
* layer request events, on a per-disk_devt basis, as input to the * layer request events, on a per-disk_devt basis, as input to the
...@@ -248,6 +244,7 @@ ...@@ -248,6 +244,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/fips.h> #include <linux/fips.h>
#include <linux/ptrace.h>
#ifdef CONFIG_GENERIC_HARDIRQS #ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h> # include <linux/irq.h>
...@@ -256,6 +253,7 @@ ...@@ -256,6 +253,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/io.h> #include <asm/io.h>
/* /*
...@@ -421,7 +419,9 @@ struct entropy_store { ...@@ -421,7 +419,9 @@ struct entropy_store {
spinlock_t lock; spinlock_t lock;
unsigned add_ptr; unsigned add_ptr;
int entropy_count; int entropy_count;
int entropy_total;
int input_rotate; int input_rotate;
unsigned int initialized:1;
__u8 last_data[EXTRACT_SIZE]; __u8 last_data[EXTRACT_SIZE];
}; };
...@@ -454,6 +454,10 @@ static struct entropy_store nonblocking_pool = { ...@@ -454,6 +454,10 @@ static struct entropy_store nonblocking_pool = {
.pool = nonblocking_pool_data .pool = nonblocking_pool_data
}; };
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
/* /*
* This function adds bytes into the entropy "pool". It does not * This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call * update the entropy estimate. The caller should call
...@@ -467,9 +471,6 @@ static struct entropy_store nonblocking_pool = { ...@@ -467,9 +471,6 @@ static struct entropy_store nonblocking_pool = {
static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
int nbytes, __u8 out[64]) int nbytes, __u8 out[64])
{ {
static __u32 const twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
unsigned long i, j, tap1, tap2, tap3, tap4, tap5; unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
int input_rotate; int input_rotate;
int wordmask = r->poolinfo->poolwords - 1; int wordmask = r->poolinfo->poolwords - 1;
...@@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) ...@@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
mix_pool_bytes_extract(r, in, bytes, NULL); mix_pool_bytes_extract(r, in, bytes, NULL);
} }
struct fast_pool {
__u32 pool[4];
unsigned long last;
unsigned short count;
unsigned char rotate;
unsigned char last_timer_intr;
};
/*
* This is a fast mixing routine used by the interrupt randomness
* collector. It's hardcoded for an 128 bit pool and assumes that any
* locks that might be needed are taken by the caller.
*/
static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
{
const char *bytes = in;
__u32 w;
unsigned i = f->count;
unsigned input_rotate = f->rotate;
while (nbytes--) {
w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
f->pool[(i + 1) & 3];
f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
input_rotate += (i++ & 3) ? 7 : 14;
}
f->count = i;
f->rotate = input_rotate;
}
/* /*
* Credit (or debit) the entropy store with n bits of entropy * Credit (or debit) the entropy store with n bits of entropy
*/ */
...@@ -551,6 +582,12 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) ...@@ -551,6 +582,12 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
entropy_count = r->poolinfo->POOLBITS; entropy_count = r->poolinfo->POOLBITS;
r->entropy_count = entropy_count; r->entropy_count = entropy_count;
if (!r->initialized && nbits > 0) {
r->entropy_total += nbits;
if (r->entropy_total > 128)
r->initialized = 1;
}
/* should we wake readers? */ /* should we wake readers? */
if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
wake_up_interruptible(&random_read_wait); wake_up_interruptible(&random_read_wait);
...@@ -700,17 +737,48 @@ void add_input_randomness(unsigned int type, unsigned int code, ...@@ -700,17 +737,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
} }
EXPORT_SYMBOL_GPL(add_input_randomness); EXPORT_SYMBOL_GPL(add_input_randomness);
void add_interrupt_randomness(int irq) static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
void add_interrupt_randomness(int irq, int irq_flags)
{ {
struct timer_rand_state *state; struct entropy_store *r;
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
__u32 input[4], cycles = get_cycles();
input[0] = cycles ^ jiffies;
input[1] = irq;
if (regs) {
__u64 ip = instruction_pointer(regs);
input[2] = ip;
input[3] = ip >> 32;
}
state = get_timer_rand_state(irq); fast_mix(fast_pool, input, sizeof(input));
if (state == NULL) if ((fast_pool->count & 1023) &&
!time_after(now, fast_pool->last + HZ))
return; return;
DEBUG_ENT("irq event %d\n", irq); fast_pool->last = now;
add_timer_randomness(state, 0x100 + irq);
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
/*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
* any entropy.
*/
if (cycles == 0) {
if (irq_flags & __IRQF_TIMER) {
if (fast_pool->last_timer_intr)
return;
fast_pool->last_timer_intr = 1;
} else
fast_pool->last_timer_intr = 0;
}
credit_entropy_bits(r, 1);
} }
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
...@@ -971,6 +1039,7 @@ static void init_std_data(struct entropy_store *r) ...@@ -971,6 +1039,7 @@ static void init_std_data(struct entropy_store *r)
spin_lock_irqsave(&r->lock, flags); spin_lock_irqsave(&r->lock, flags);
r->entropy_count = 0; r->entropy_count = 0;
r->entropy_total = 0;
spin_unlock_irqrestore(&r->lock, flags); spin_unlock_irqrestore(&r->lock, flags);
now = ktime_get_real(); now = ktime_get_real();
......
...@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data) ...@@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
u32 fatevent; u32 fatevent;
int err; int err;
add_interrupt_randomness(irq);
err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
event_regs, 3); event_regs, 3);
if (err) if (err)
......
...@@ -52,7 +52,7 @@ extern void rand_initialize_irq(int irq); ...@@ -52,7 +52,7 @@ extern void rand_initialize_irq(int irq);
extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value); unsigned int value);
extern void add_interrupt_randomness(int irq); extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]); void generate_random_uuid(unsigned char uuid_out[16]);
......
...@@ -133,7 +133,7 @@ irqreturn_t ...@@ -133,7 +133,7 @@ irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{ {
irqreturn_t retval = IRQ_NONE; irqreturn_t retval = IRQ_NONE;
unsigned int random = 0, irq = desc->irq_data.irq; unsigned int flags = 0, irq = desc->irq_data.irq;
do { do {
irqreturn_t res; irqreturn_t res;
...@@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) ...@@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
/* Fall through to add to randomness */ /* Fall through to add to randomness */
case IRQ_HANDLED: case IRQ_HANDLED:
random |= action->flags; flags |= action->flags;
break; break;
default: default:
...@@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) ...@@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next; action = action->next;
} while (action); } while (action);
if (random & IRQF_SAMPLE_RANDOM) add_interrupt_randomness(irq, flags);
add_interrupt_randomness(irq);
if (!noirqdebug) if (!noirqdebug)
note_interrupt(irq, desc, retval); note_interrupt(irq, desc, retval);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment