Commit c11878fd authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'for-mingo-kcsan' of...

Merge branch 'for-mingo-kcsan' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/core

Pull KCSAN updates from Paul E. McKenney:

 "Kernel concurrency sanitizer (KCSAN) updates from Marco Elver."
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 62137364 567a83e6
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/random.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1]; ...@@ -101,7 +100,7 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
static DEFINE_PER_CPU(long, kcsan_skip); static DEFINE_PER_CPU(long, kcsan_skip);
/* For kcsan_prandom_u32_max(). */ /* For kcsan_prandom_u32_max(). */
static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state); static DEFINE_PER_CPU(u32, kcsan_rand_state);
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr, static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
size_t size, size_t size,
...@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx * ...@@ -275,20 +274,17 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
} }
/* /*
* Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max() * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
* for more details. * congruential generator, using constants from "Numerical Recipes".
*
* The open-coded version here is using only safe primitives for all contexts
* where we can have KCSAN instrumentation. In particular, we cannot use
* prandom_u32() directly, as its tracepoint could cause recursion.
*/ */
static u32 kcsan_prandom_u32_max(u32 ep_ro) static u32 kcsan_prandom_u32_max(u32 ep_ro)
{ {
struct rnd_state *state = &get_cpu_var(kcsan_rand_state); u32 state = this_cpu_read(kcsan_rand_state);
const u32 res = prandom_u32_state(state);
state = 1664525 * state + 1013904223;
this_cpu_write(kcsan_rand_state, state);
put_cpu_var(kcsan_rand_state); return state % ep_ro;
return (u32)(((u64) res * ep_ro) >> 32);
} }
static inline void reset_kcsan_skip(void) static inline void reset_kcsan_skip(void)
...@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size, ...@@ -639,10 +635,14 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
void __init kcsan_init(void) void __init kcsan_init(void)
{ {
int cpu;
BUG_ON(!in_task()); BUG_ON(!in_task());
kcsan_debugfs_init(); kcsan_debugfs_init();
prandom_seed_full_state(&kcsan_rand_state);
for_each_possible_cpu(cpu)
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
/* /*
* We are in the init task, and no other tasks should be running; * We are in the init task, and no other tasks should be running;
......
...@@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n ...@@ -27,9 +27,6 @@ KASAN_SANITIZE_string.o := n
CFLAGS_string.o += -fno-stack-protector CFLAGS_string.o += -fno-stack-protector
endif endif
# Used by KCSAN while enabled, avoid recursion.
KCSAN_SANITIZE_random32.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o extable.o sha1.o irq_regs.o argv_split.o \ idr.o extable.o sha1.o irq_regs.o argv_split.o \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment