Commit 9756f64c authored by Marco Elver's avatar Marco Elver Committed by Paul E. McKenney

kcsan: Avoid checking scoped accesses from nested contexts

Avoid checking scoped accesses from nested contexts (such as nested
interrupts or in scheduler code) which share the same kcsan_ctx.

This is to avoid detecting false positive races of accesses in the same
thread with currently scoped accesses: consider setting up a watchpoint
for a non-scoped (normal) access that also "conflicts" with a current
scoped access. In a nested interrupt (or in the scheduler), which shares
the same kcsan_ctx, we cannot check scoped accesses set up in the parent
context -- simply ignore them in this case.

With the introduction of kcsan_ctx::disable_scoped, we can also clean up
kcsan_check_scoped_accesses()'s recursion guard, and do not need to
modify the list's prev pointer.
Signed-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 71f8de70
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
struct kcsan_ctx { struct kcsan_ctx {
int disable_count; /* disable counter */ int disable_count; /* disable counter */
int disable_scoped; /* disable scoped access counter */
int atomic_next; /* number of following atomic ops */ int atomic_next; /* number of following atomic ops */
/* /*
......
...@@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip); ...@@ -204,15 +204,17 @@ check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);
static noinline void kcsan_check_scoped_accesses(void) static noinline void kcsan_check_scoped_accesses(void)
{ {
struct kcsan_ctx *ctx = get_ctx(); struct kcsan_ctx *ctx = get_ctx();
struct list_head *prev_save = ctx->scoped_accesses.prev;
struct kcsan_scoped_access *scoped_access; struct kcsan_scoped_access *scoped_access;
ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ if (ctx->disable_scoped)
return;
ctx->disable_scoped++;
list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) { list_for_each_entry(scoped_access, &ctx->scoped_accesses, list) {
check_access(scoped_access->ptr, scoped_access->size, check_access(scoped_access->ptr, scoped_access->size,
scoped_access->type, scoped_access->ip); scoped_access->type, scoped_access->ip);
} }
ctx->scoped_accesses.prev = prev_save; ctx->disable_scoped--;
} }
/* Rules for generic atomic accesses. Called from fast-path. */ /* Rules for generic atomic accesses. Called from fast-path. */
...@@ -465,6 +467,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned ...@@ -465,6 +467,15 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
goto out; goto out;
} }
/*
* Avoid races of scoped accesses from nested interrupts (or scheduler).
* Assume setting up a watchpoint for a non-scoped (normal) access that
* also conflicts with a current scoped access. In a nested interrupt,
* which shares the context, it would check a conflicting scoped access.
* To avoid, disable scoped access checking.
*/
ctx->disable_scoped++;
/* /*
* Save and restore the IRQ state trace touched by KCSAN, since KCSAN's * Save and restore the IRQ state trace touched by KCSAN, since KCSAN's
* runtime is entered for every memory access, and potentially useful * runtime is entered for every memory access, and potentially useful
...@@ -578,6 +589,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned ...@@ -578,6 +589,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned
if (!kcsan_interrupt_watcher) if (!kcsan_interrupt_watcher)
local_irq_restore(irq_flags); local_irq_restore(irq_flags);
kcsan_restore_irqtrace(current); kcsan_restore_irqtrace(current);
ctx->disable_scoped--;
out: out:
user_access_restore(ua_flags); user_access_restore(ua_flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment