Commit fbf6dda7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] random: SMP locking

From: Oliver Xymoron <oxymoron@waste.org>

This patch adds locking for SMP. Apparently Willy never managed to
revive his laptop with his version so I revived mine.

The batch pool is copied as a block to avoid long lock hold times
while mixing it into the primary pool. 

Two locks are added:

gobal batch_lock
   batch_entropy_store can be called from any context, and typically from
   interrupts -> spin_lock_irqsave

   batch_entropy_process is called called via schedule_delayed_work and
   runs in process context -> spin_lock_irq

entropy_store.lock
   the mixing process is too expensive to be called from an interrupt
   context and the basic worker function extract_entropy can sleep, so
   all this stuff can be under a normal spin_lock
parent b0c15cba
......@@ -486,6 +486,7 @@ struct entropy_store {
int extract_count;
struct poolinfo poolinfo;
__u32 *pool;
spinlock_t lock;
};
/*
......@@ -524,6 +525,7 @@ static int create_entropy_store(int size, struct entropy_store **ret_bucket)
return -ENOMEM;
}
memset(r->pool, 0, POOLBYTES);
r->lock = SPIN_LOCK_UNLOCKED;
*ret_bucket = r;
return 0;
}
......@@ -565,6 +567,9 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
int new_rotate;
int wordmask = r->poolinfo.poolwords - 1;
__u32 w;
unsigned long flags;
spin_lock_irqsave(&r->lock, flags);
while (nwords--) {
w = rotate_left(r->input_rotate, *in++);
......@@ -589,6 +594,8 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
w ^= r->pool[i];
r->pool[i] = (w >> 3) ^ twist_table[w & 7];
}
spin_unlock_irqrestore(&r->lock, flags);
}
/*
......@@ -596,6 +603,10 @@ static void add_entropy_words(struct entropy_store *r, const __u32 *in,
*/
static void credit_entropy_store(struct entropy_store *r, int nbits)
{
unsigned long flags;
spin_lock_irqsave(&r->lock, flags);
if (r->entropy_count + nbits < 0) {
DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
r->entropy_count, nbits);
......@@ -610,6 +621,8 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
r == random_state ? "primary" : "unknown",
nbits, r->entropy_count);
}
spin_unlock_irqrestore(&r->lock, flags);
}
/**********************************************************************
......@@ -620,27 +633,33 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
*
**********************************************************************/
static __u32 *batch_entropy_pool;
static int *batch_entropy_credit;
static int batch_max;
struct sample {
__u32 data[2];
int credit;
};
static struct sample *batch_entropy_pool, *batch_entropy_copy;
static int batch_head, batch_tail;
static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;
static int batch_max;
static void batch_entropy_process(void *private_);
static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
/* note: the size must be a power of 2 */
static int __init batch_entropy_init(int size, struct entropy_store *r)
{
batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL);
batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
if (!batch_entropy_pool)
return -1;
batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL);
if (!batch_entropy_credit) {
batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
if (!batch_entropy_copy) {
kfree(batch_entropy_pool);
return -1;
}
batch_head = batch_tail = 0;
batch_max = size;
batch_work.data = r;
batch_max = size;
return 0;
}
......@@ -653,26 +672,32 @@ static int __init batch_entropy_init(int size, struct entropy_store *r)
void batch_entropy_store(u32 a, u32 b, int num)
{
int new;
unsigned long flags;
if (!batch_max)
return;
batch_entropy_pool[2*batch_head] = a;
batch_entropy_pool[(2*batch_head) + 1] = b;
batch_entropy_credit[batch_head] = num;
spin_lock_irqsave(&batch_lock, flags);
new = (batch_head+1) & (batch_max-1);
if ((unsigned)(new - batch_tail) >= (unsigned)(batch_max / 2)) {
batch_entropy_pool[batch_head].data[0] = a;
batch_entropy_pool[batch_head].data[1] = b;
batch_entropy_pool[batch_head].credit = num;
if (((batch_head - batch_tail) & (batch_max-1)) >= (batch_max / 2)) {
/*
* Schedule it for the next timer tick:
*/
schedule_delayed_work(&batch_work, 1);
batch_head = new;
} else if (new == batch_tail) {
}
new = (batch_head+1) & (batch_max-1);
if (new == batch_tail) {
DEBUG_ENT("batch entropy buffer full\n");
} else {
batch_head = new;
}
spin_unlock_irqrestore(&batch_lock, flags);
}
/*
......@@ -684,20 +709,34 @@ static void batch_entropy_process(void *private_)
{
struct entropy_store *r = (struct entropy_store *) private_, *p;
int max_entropy = r->poolinfo.POOLBITS;
unsigned head, tail;
if (!batch_max)
return;
/* Mixing into the pool is expensive, so copy over the batch
* data and release the batch lock. The pool is at least half
* full, so don't worry too much about copying only the used
* part.
*/
spin_lock_irq(&batch_lock);
memcpy(batch_entropy_copy, batch_entropy_pool,
batch_max*sizeof(struct sample));
head = batch_head;
tail = batch_tail;
batch_tail = batch_head;
spin_unlock_irq(&batch_lock);
p = r;
while (batch_head != batch_tail) {
while (head != tail) {
if (r->entropy_count >= max_entropy) {
r = (r == sec_random_state) ? random_state :
sec_random_state;
max_entropy = r->poolinfo.POOLBITS;
}
add_entropy_words(r, batch_entropy_pool + 2*batch_tail, 2);
credit_entropy_store(r, batch_entropy_credit[batch_tail]);
batch_tail = (batch_tail+1) & (batch_max-1);
add_entropy_words(r, batch_entropy_copy[tail].data, 2);
credit_entropy_store(r, batch_entropy_copy[tail].credit);
tail = (tail+1) & (batch_max-1);
}
if (p->entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait);
......@@ -1276,6 +1315,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
ssize_t ret, i;
__u32 tmp[TMP_BUF_SIZE];
__u32 x;
unsigned long cpuflags;
add_timer_randomness(&extract_timer_state, nbytes);
......@@ -1286,6 +1326,9 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
if (flags & EXTRACT_ENTROPY_SECONDARY)
xfer_secondary_pool(r, nbytes, tmp);
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, cpuflags);
DEBUG_ENT("%s has %d bits, want %d bits\n",
r == sec_random_state ? "secondary" :
r == random_state ? "primary" : "unknown",
......@@ -1301,6 +1344,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void * buf,
r->extract_count += nbytes;
spin_unlock_irqrestore(&r->lock, cpuflags);
ret = 0;
while (nbytes) {
/*
......@@ -1595,8 +1640,9 @@ static int
random_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
int *p, size, ent_count;
int *p, *tmp, size, ent_count;
int retval;
unsigned long flags;
switch (cmd) {
case RNDGETENTCNT:
......@@ -1621,17 +1667,36 @@ random_ioctl(struct inode * inode, struct file * file,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
p = (int *) arg;
ent_count = random_state->entropy_count;
if (put_user(ent_count, p++) ||
get_user(size, p) ||
if (get_user(size, p) ||
put_user(random_state->poolinfo.poolwords, p++))
return -EFAULT;
if (size < 0)
return -EINVAL;
return -EFAULT;
if (size > random_state->poolinfo.poolwords)
size = random_state->poolinfo.poolwords;
if (copy_to_user(p, random_state->pool, size * sizeof(__u32)))
/* prepare to atomically snapshot pool */
tmp = kmalloc(size * sizeof(__u32), GFP_KERNEL);
if (!tmp)
return -EFAULT;
spin_lock_irqsave(&random_state->lock, flags);
ent_count = random_state->entropy_count;
memcpy(tmp, random_state->pool, size * sizeof(__u32));
spin_unlock_irqrestore(&random_state->lock, flags);
if (!copy_to_user(p, tmp, size * sizeof(__u32))) {
kfree(tmp);
return -EFAULT;
}
kfree(tmp);
if(put_user(ent_count, p++))
return -EFAULT;
return 0;
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment