Commit 0dd50d1b authored by Daniel Borkmann's avatar Daniel Borkmann Committed by David S. Miller

random32: add prandom_seed_full_state helper

Factor out the full reseed handling code that populates the state
through get_random_bytes() and runs prandom_warmup(). The resulting
prandom_seed_full_state() will be used later on in more than the
current __prandom_reseed() user. Fix also two minor whitespace
issues along the way.
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarHannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c90aeb94
...@@ -181,7 +181,7 @@ void prandom_seed(u32 entropy) ...@@ -181,7 +181,7 @@ void prandom_seed(u32 entropy)
* No locking on the CPUs, but then somewhat random results are, well, * No locking on the CPUs, but then somewhat random results are, well,
* expected. * expected.
*/ */
for_each_possible_cpu (i) { for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state, i); struct rnd_state *state = &per_cpu(net_rand_state, i);
state->s1 = __seed(state->s1 ^ entropy, 2U); state->s1 = __seed(state->s1 ^ entropy, 2U);
...@@ -201,7 +201,7 @@ static int __init prandom_init(void) ...@@ -201,7 +201,7 @@ static int __init prandom_init(void)
prandom_state_selftest(); prandom_state_selftest();
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i); struct rnd_state *state = &per_cpu(net_rand_state, i);
u32 weak_seed = (i + jiffies) ^ random_get_entropy(); u32 weak_seed = (i + jiffies) ^ random_get_entropy();
prandom_seed_early(state, weak_seed, true); prandom_seed_early(state, weak_seed, true);
...@@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void) ...@@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void)
add_timer(&seed_timer); add_timer(&seed_timer);
} }
static void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
{
int i;
for_each_possible_cpu(i) {
struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
u32 seeds[4];
get_random_bytes(&seeds, sizeof(seeds));
state->s1 = __seed(seeds[0], 2U);
state->s2 = __seed(seeds[1], 8U);
state->s3 = __seed(seeds[2], 16U);
state->s4 = __seed(seeds[3], 128U);
prandom_warmup(state);
}
}
/* /*
* Generate better values after random number generator * Generate better values after random number generator
* is fully initialized. * is fully initialized.
*/ */
static void __prandom_reseed(bool late) static void __prandom_reseed(bool late)
{ {
int i;
unsigned long flags; unsigned long flags;
static bool latch = false; static bool latch = false;
static DEFINE_SPINLOCK(lock); static DEFINE_SPINLOCK(lock);
...@@ -266,19 +283,7 @@ static void __prandom_reseed(bool late) ...@@ -266,19 +283,7 @@ static void __prandom_reseed(bool late)
goto out; goto out;
latch = true; latch = true;
prandom_seed_full_state(&net_rand_state);
for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state,i);
u32 seeds[4];
get_random_bytes(&seeds, sizeof(seeds));
state->s1 = __seed(seeds[0], 2U);
state->s2 = __seed(seeds[1], 8U);
state->s3 = __seed(seeds[2], 16U);
state->s4 = __seed(seeds[3], 128U);
prandom_warmup(state);
}
out: out:
spin_unlock_irqrestore(&lock, flags); spin_unlock_irqrestore(&lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment