Commit 06947aaa authored by Maciej W. Rozycki's avatar Maciej W. Rozycki Committed by Ralf Baechle

MIPS: Implement random_get_entropy with CP0 Random

Update to commit 9c9b415c [MIPS:
Reimplement get_cycles().]

On systems were for whatever reasons we can't use the cycle counter, fall
back to the c0_random register as an entropy source.  It has however a
very small range that makes it suitable for random_get_entropy only and
not get_cycles.

This optimised version compiles to 8 instructions in the fast path even in
the worst case of all the conditions to check being variable (including a
MFC0 move delay slot that is only required for very old processors):

     828:	8cf90000 	lw	t9,0(a3)
			828: R_MIPS_LO16	jiffies
     82c:	40057800 	mfc0	a1,c0_prid
     830:	3c0200ff 	lui	v0,0xff
     834:	00a21024 	and	v0,a1,v0
     838:	1040007d 	beqz	v0,a30 <add_interrupt_randomness+0x22c>
     83c:	3c030000 	lui	v1,0x0
			83c: R_MIPS_HI16	cpu_data
     840:	40024800 	mfc0	v0,c0_count
     844:	00000000 	nop
     848:	00409021 	move	s2,v0
     84c:	8ce20000 	lw	v0,0(a3)
			84c: R_MIPS_LO16	jiffies

On most targets the sequence will be shorter and on some it will reduce to
a single `MFC0 <reg>,c0_count', as all MIPS architecture (i.e. non-legacy
MIPS) processors require the CP0 Count register to be present.

The only known exception that reports MIPS architecture compliance, but
contrary to that lacks CP0 Count is the Ingenic JZ4740 thingy.  For broken
platforms like that this code requires cpu_has_counter to be hardcoded to
0 (i.e. no variable setting is permitted) so as not to penalise all the
other good platforms out there.

The asm barrier is required so that the compiler does not pull any
potentially costly (cold cache!) `cpu_data' variable access into the fast
path.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Signed-off-by: default avatarMaciej W. Rozycki <macro@linux-mips.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: John Crispin <blogic@openwrt.org>
Cc: Andrew McGregor <andrewmcgr@gmail.com>
Cc: Dave Taht <dave.taht@bufferbloat.net>
Cc: Felix Fietkau <nbd@nbd.name>
Cc: Simon Kelley <simon@thekelleys.org.uk>
Cc: Jim Gettys <jg@freedesktop.org>
Cc: David Daney <ddaney@caviumnetworks.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/6702/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent fedfcb11
...@@ -4,12 +4,16 @@ ...@@ -4,12 +4,16 @@
* for more details. * for more details.
* *
* Copyright (C) 1998, 1999, 2003 by Ralf Baechle * Copyright (C) 1998, 1999, 2003 by Ralf Baechle
* Copyright (C) 2014 by Maciej W. Rozycki
*/ */
#ifndef _ASM_TIMEX_H #ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H #define _ASM_TIMEX_H
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/compiler.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
...@@ -45,29 +49,54 @@ typedef unsigned int cycles_t; ...@@ -45,29 +49,54 @@ typedef unsigned int cycles_t;
* However for now the implementaton of this function doesn't get these * However for now the implementaton of this function doesn't get these
* fine details right. * fine details right.
*/ */
static inline int can_use_mips_counter(unsigned int prid)
{
int comp = (prid & PRID_COMP_MASK) != PRID_COMP_LEGACY;
if (__builtin_constant_p(cpu_has_counter) && !cpu_has_counter)
return 0;
else if (__builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r)
return 1;
else if (likely(!__builtin_constant_p(cpu_has_mips_r) && comp))
return 1;
/* Make sure we don't peek at cpu_data[0].options in the fast path! */
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
return 1;
else
return 0;
}
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
switch (boot_cpu_type()) { if (can_use_mips_counter(read_c0_prid()))
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
if ((read_c0_prid() & 0xff) >= 0x0050)
return read_c0_count(); return read_c0_count();
break; else
return 0; /* no usable counter */
}
case CPU_R4000PC: /*
case CPU_R4000SC: * Like get_cycles - but where c0_count is not available we desperately
case CPU_R4000MC: * use c0_random in an attempt to get at least a little bit of entropy.
break; *
* R6000 and R6000A neither have a count register nor a random register.
* That leaves no entropy source in the CPU itself.
*/
static inline unsigned long random_get_entropy(void)
{
unsigned int prid = read_c0_prid();
unsigned int imp = prid & PRID_IMP_MASK;
default: if (can_use_mips_counter(prid))
if (cpu_has_counter)
return read_c0_count(); return read_c0_count();
break; else if (likely(imp != PRID_IMP_R6000 && imp != PRID_IMP_R6000A))
} return read_c0_random();
else
return 0; /* no usable counter */ return 0; /* no usable register */
} }
#define random_get_entropy random_get_entropy
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -1026,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) ...@@ -1026,6 +1026,7 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
decode_configs(c); decode_configs(c);
/* JZRISC does not implement the CP0 counter. */ /* JZRISC does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER; c->options &= ~MIPS_CPU_COUNTER;
BUG_ON(!__builtin_constant_p(cpu_has_counter) || cpu_has_counter);
switch (c->processor_id & PRID_IMP_MASK) { switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_JZRISC: case PRID_IMP_JZRISC:
c->cputype = CPU_JZRISC; c->cputype = CPU_JZRISC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment