Commit 2c03e16f authored by Jason A. Donenfeld's avatar Jason A. Donenfeld

random: remove early archrandom abstraction

The arch_get_random*_early() abstraction is not completely useful and
adds complexity, because it's not a given that there will be no calls to
arch_get_random*() between random_init_early(), which uses
arch_get_random*_early(), and init_cpu_features(). During that gap,
crng_reseed() might be called, which uses arch_get_random*(), since it's
mostly not init code.

Instead we can test whether we're in the early phase in
arch_get_random*() itself, and in doing so avoid all ambiguity about
where we are. Fortunately, the only architecture that currently
implements arch_get_random*_early() also has an alternatives-based cpu
feature system, one flag of which determines whether the other flags
have been initialized. This makes it possible to do the early check with
zero cost once the system is initialized.
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Signed-off-by: default avatarJason A. Donenfeld <Jason@zx2c4.com>
parent b9b01a56
......@@ -5,6 +5,7 @@
#include <linux/arm-smccc.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/cpufeature.h>
#define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
......@@ -58,6 +59,13 @@ static inline bool __arm64_rndrrs(unsigned long *v)
return ok;
}
static __always_inline bool __cpu_has_rng(void)
{
if (unlikely(!system_capabilities_finalized() && !preemptible()))
return this_cpu_has_cap(ARM64_HAS_RNG);
return cpus_have_const_cap(ARM64_HAS_RNG);
}
static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
/*
......@@ -66,7 +74,7 @@ static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t
* cpufeature code and with potential scheduling between CPUs
* with and without the feature.
*/
if (max_longs && cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndr(v))
if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
return 1;
return 0;
}
......@@ -108,7 +116,7 @@ static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, s
* reseeded after each invocation. This is not a 100% fit but good
* enough to implement this API if no other entropy source exists.
*/
if (cpus_have_const_cap(ARM64_HAS_RNG) && __arm64_rndrrs(v))
if (__cpu_has_rng() && __arm64_rndrrs(v))
return 1;
return 0;
......@@ -121,40 +129,4 @@ static inline bool __init __early_cpu_has_rndr(void)
return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
}
static inline size_t __init __must_check
arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
if (!max_longs)
return 0;
if (smccc_trng_available) {
struct arm_smccc_res res;
max_longs = min_t(size_t, 3, max_longs);
arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
if ((int)res.a0 >= 0) {
switch (max_longs) {
case 3:
*v++ = res.a1;
fallthrough;
case 2:
*v++ = res.a2;
fallthrough;
case 1:
*v++ = res.a3;
break;
}
return max_longs;
}
}
if (__early_cpu_has_rndr() && __arm64_rndr(v))
return 1;
return 0;
}
#define arch_get_random_seed_longs_early arch_get_random_seed_longs_early
#endif /* _ASM_ARCHRANDOM_H */
......@@ -829,13 +829,13 @@ void __init random_init_early(const char *command_line)
#endif
for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
continue;
}
longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
if (longs) {
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
i += longs;
......
......@@ -154,26 +154,6 @@ declare_get_random_var_wait(long, unsigned long)
#include <asm/archrandom.h>
/*
* Called from the boot CPU during startup; not valid to call once
* secondary CPUs are up and preemption is possible.
*/
#ifndef arch_get_random_seed_longs_early
static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
return arch_get_random_seed_longs(v, max_longs);
}
#endif
#ifndef arch_get_random_longs_early
static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
{
WARN_ON(system_state != SYSTEM_BOOTING);
return arch_get_random_longs(v, max_longs);
}
#endif
#ifdef CONFIG_SMP
int random_prepare_cpu(unsigned int cpu);
int random_online_cpu(unsigned int cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment