Commit 8adc0486 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random

Pull random number generator updates from Jason Donenfeld:

 - Huawei reported that when they updated their kernel from 4.4 to
   something much newer, some userspace code they had broke, the culprit
   being the accidental removal of O_NONBLOCK from /dev/random way back
   in 5.6. It's been gone for over 2 years now and this is the first
   we've heard of it, but userspace breakage is userspace breakage, so
   O_NONBLOCK is now back.

 - Use randomness from hardware RNGs much more often during early boot,
   at the same interval that crng reseeds are done, from Dominik.

 - A semantic change in hardware RNG throttling, so that the hwrng
   framework can properly feed random.c with randomness from hardware
   RNGs that aren't specifically marked as creditable.

   A related patch coming to you via Herbert's hwrng tree depends on
   this one, not to compile, but just to function properly, so you may
   want to merge this PULL before that one.

 - A fix to clamp credited bits from the interrupts pool to the size of
   the pool sample. This is mainly just a theoretical fix, as it'd be
   pretty hard to exceed it in practice.

 - Oracle reported that InfiniBand TCP latency regressed by around
   10-15% after a change a few cycles ago made at the request of the RT
   folks, in which we hoisted a somewhat rare operation (1 in 1024
   times) out of the hard IRQ handler and into a workqueue, a pretty
   common and boring pattern.

   It turns out, though, that scheduling a worker from there has
   overhead of its own, whereas scheduling a timer on that same CPU for
   the next jiffy amortizes better and doesn't incur the same overhead.

   I also eliminated a cache miss by moving the work_struct (and
   subsequently, the timer_list) to below a critical cache line, so that
   the more critical members that are accessed on every hard IRQ aren't
   split between two cache lines.

 - The boot-time initialization of the RNG has been split into two
   approximate phases: what we can accomplish before timekeeping is
   possible and what we can accomplish after.

   This winds up being useful so that we can use RDRAND to seed the RNG
   before CONFIG_SLAB_FREELIST_RANDOM=y systems initialize slabs, in
   addition to other early uses of randomness. The effect is that
   systems with RDRAND (or a bootloader seed) will never see any
   warnings at all when setting CONFIG_WARN_ALL_UNSEEDED_RANDOM=y. And
   kfence benefits from getting a better seed of its own.

 - Small systems without much entropy sometimes wind up putting some
   truncated serial number read from flash into hostname, so contribute
   utsname changes to the RNG, without crediting.

 - Add smaller batches to serve requests for smaller integers, and make
   use of them when people ask for random numbers bounded by a given
   compile-time constant. This has positive effects all over the tree,
   most notably in networking and kfence.

 - The original jitter algorithm intended (I believe) to schedule the
   timer for the next jiffy, not the next-next jiffy, yet it used
   mod_timer(jiffies + 1), which will fire on the next-next jiffy,
   instead of what I believe was intended, mod_timer(jiffies), which
   will fire on the next jiffy. So fix that.

 - Fix a comment typo, from William.

* tag 'random-6.1-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/crng/random:
  random: clear new batches when bringing new CPUs online
  random: fix typos in get_random_bytes() comment
  random: schedule jitter credit for next jiffy, not in two jiffies
  prandom: make use of smaller types in prandom_u32_max
  random: add 8-bit and 16-bit batches
  utsname: contribute changes to RNG
  random: use init_utsname() instead of utsname()
  kfence: use better stack hash seed
  random: split initialization into early step and later step
  random: use expired timer rather than wq for mixing fast pool
  random: avoid reading two cache lines on irq randomness
  random: clamp credited irq bits to maximum mixed
  random: throttle hwrng writes if no entropy is credited
  random: use hwgenerator randomness more frequently at early boot
  random: restore O_NONBLOCK support
parents 52abb27a a890d1c6
...@@ -712,8 +712,8 @@ static const struct memdev { ...@@ -712,8 +712,8 @@ static const struct memdev {
#endif #endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT }, [5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 }, [7] = { "full", 0666, &full_fops, 0 },
[8] = { "random", 0666, &random_fops, 0 }, [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
[9] = { "urandom", 0666, &urandom_fops, 0 }, [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 }, [11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif #endif
......
This diff is collapsed.
...@@ -12,11 +12,13 @@ ...@@ -12,11 +12,13 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/random.h> #include <linux/random.h>
/* Deprecated: use get_random_u32 instead. */
static inline u32 prandom_u32(void) static inline u32 prandom_u32(void)
{ {
return get_random_u32(); return get_random_u32();
} }
/* Deprecated: use get_random_bytes instead. */
static inline void prandom_bytes(void *buf, size_t nbytes) static inline void prandom_bytes(void *buf, size_t nbytes)
{ {
return get_random_bytes(buf, nbytes); return get_random_bytes(buf, nbytes);
...@@ -37,17 +39,20 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); ...@@ -37,17 +39,20 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
* @ep_ro: right open interval endpoint * @ep_ro: right open interval endpoint
* *
* Returns a pseudo-random number that is in interval [0, ep_ro). Note * Returns a pseudo-random number that is in interval [0, ep_ro). This is
* that the result depends on PRNG being well distributed in [0, ~0U] * useful when requesting a random index of an array containing ep_ro elements,
* u32 space. Here we use maximally equidistributed combined Tausworthe * for example. The result is somewhat biased when ep_ro is not a power of 2,
* generator, that is, prandom_u32(). This is useful when requesting a * so do not use this for cryptographic purposes.
* random index of an array containing ep_ro elements, for example.
* *
* Returns: pseudo-random number in interval [0, ep_ro) * Returns: pseudo-random number in interval [0, ep_ro)
*/ */
static inline u32 prandom_u32_max(u32 ep_ro) static inline u32 prandom_u32_max(u32 ep_ro)
{ {
return (u32)(((u64) prandom_u32() * ep_ro) >> 32); if (__builtin_constant_p(ep_ro <= 1U << 8) && ep_ro <= 1U << 8)
return (get_random_u8() * ep_ro) >> 8;
if (__builtin_constant_p(ep_ro <= 1U << 16) && ep_ro <= 1U << 16)
return (get_random_u16() * ep_ro) >> 16;
return ((u64)get_random_u32() * ep_ro) >> 32;
} }
/* /*
......
...@@ -38,6 +38,8 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { ...@@ -38,6 +38,8 @@ static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) {
#endif #endif
void get_random_bytes(void *buf, size_t len); void get_random_bytes(void *buf, size_t len);
u8 get_random_u8(void);
u16 get_random_u16(void);
u32 get_random_u32(void); u32 get_random_u32(void);
u64 get_random_u64(void); u64 get_random_u64(void);
static inline unsigned int get_random_int(void) static inline unsigned int get_random_int(void)
...@@ -72,7 +74,8 @@ static inline unsigned long get_random_canary(void) ...@@ -72,7 +74,8 @@ static inline unsigned long get_random_canary(void)
return get_random_long() & CANARY_MASK; return get_random_long() & CANARY_MASK;
} }
int __init random_init(const char *command_line); void __init random_init_early(const char *command_line);
void __init random_init(void);
bool rng_is_initialized(void); bool rng_is_initialized(void);
int wait_for_random_bytes(void); int wait_for_random_bytes(void);
...@@ -93,6 +96,8 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes) ...@@ -93,6 +96,8 @@ static inline int get_random_bytes_wait(void *buf, size_t nbytes)
*out = get_random_ ## name(); \ *out = get_random_ ## name(); \
return 0; \ return 0; \
} }
declare_get_random_var_wait(u8, u8)
declare_get_random_var_wait(u16, u16)
declare_get_random_var_wait(u32, u32) declare_get_random_var_wait(u32, u32)
declare_get_random_var_wait(u64, u32) declare_get_random_var_wait(u64, u32)
declare_get_random_var_wait(int, unsigned int) declare_get_random_var_wait(int, unsigned int)
......
...@@ -976,6 +976,9 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) ...@@ -976,6 +976,9 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
parse_args("Setting extra init args", extra_init_args, parse_args("Setting extra init args", extra_init_args,
NULL, 0, -1, -1, NULL, set_init_arg); NULL, 0, -1, -1, NULL, set_init_arg);
/* Architectural and non-timekeeping rng init, before allocator init */
random_init_early(command_line);
/* /*
* These use large bootmem allocations and must precede * These use large bootmem allocations and must precede
* kmem_cache_init() * kmem_cache_init()
...@@ -1035,17 +1038,13 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) ...@@ -1035,17 +1038,13 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
hrtimers_init(); hrtimers_init();
softirq_init(); softirq_init();
timekeeping_init(); timekeeping_init();
kfence_init();
time_init(); time_init();
/* /* This must be after timekeeping is initialized */
* For best initial stack canary entropy, prepare it after: random_init();
* - setup_arch() for any UEFI RNG entropy and boot cmdline access
* - timekeeping_init() for ktime entropy used in random_init() /* These make use of the fully initialized rng */
* - time_init() for making random_get_entropy() work on some platforms kfence_init();
* - random_init() to initialize the RNG from from early entropy sources
*/
random_init(command_line);
boot_init_stack_canary(); boot_init_stack_canary();
perf_event_init(); perf_event_init();
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/times.h> #include <linux/times.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/random.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <linux/signal.h> #include <linux/signal.h>
...@@ -1366,6 +1367,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) ...@@ -1366,6 +1367,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
if (!copy_from_user(tmp, name, len)) { if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u; struct new_utsname *u;
add_device_randomness(tmp, len);
down_write(&uts_sem); down_write(&uts_sem);
u = utsname(); u = utsname();
memcpy(u->nodename, tmp, len); memcpy(u->nodename, tmp, len);
...@@ -1419,6 +1421,7 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) ...@@ -1419,6 +1421,7 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
if (!copy_from_user(tmp, name, len)) { if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u; struct new_utsname *u;
add_device_randomness(tmp, len);
down_write(&uts_sem); down_write(&uts_sem);
u = utsname(); u = utsname();
memcpy(u->domainname, tmp, len); memcpy(u->domainname, tmp, len);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/uts.h> #include <linux/uts.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/random.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
...@@ -57,6 +58,7 @@ static int proc_do_uts_string(struct ctl_table *table, int write, ...@@ -57,6 +58,7 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
* theoretically be incorrect if there are two parallel writes * theoretically be incorrect if there are two parallel writes
* at non-zero offsets to the same sysctl. * at non-zero offsets to the same sysctl.
*/ */
add_device_randomness(tmp_data, sizeof(tmp_data));
down_write(&uts_sem); down_write(&uts_sem);
memcpy(get_uts(table), tmp_data, sizeof(tmp_data)); memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
up_write(&uts_sem); up_write(&uts_sem);
......
...@@ -864,7 +864,7 @@ static void kfence_init_enable(void) ...@@ -864,7 +864,7 @@ static void kfence_init_enable(void)
void __init kfence_init(void) void __init kfence_init(void)
{ {
stack_hash_seed = (u32)random_get_entropy(); stack_hash_seed = get_random_u32();
/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
if (!kfence_sample_interval) if (!kfence_sample_interval)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment