Commit 17147677 authored by Frederic Weisbecker's avatar Frederic Weisbecker Committed by Paul E. McKenney

context_tracking: Convert state to atomic_t

Context tracking's state and dynticks counter are going to be merged
in a single field so that both updates can happen atomically and at the
same time. Prepare for that with converting the state into an atomic_t.

[ paulmck: Apply kernel test robot feedback. ]
Signed-off-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Nicolas Saenz Julienne <nsaenz@kernel.org>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Cc: Yu Liao <liaoyu15@huawei.com>
Cc: Phil Auld <pauld@redhat.com>
Cc: Paul Gortmaker<paul.gortmaker@windriver.com>
Cc: Alex Belits <abelits@marvell.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Reviewed-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
Tested-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
parent c33ef43a
...@@ -56,7 +56,7 @@ static inline enum ctx_state exception_enter(void) ...@@ -56,7 +56,7 @@ static inline enum ctx_state exception_enter(void)
!context_tracking_enabled()) !context_tracking_enabled())
return 0; return 0;
prev_ctx = this_cpu_read(context_tracking.state); prev_ctx = __ct_state();
if (prev_ctx != CONTEXT_KERNEL) if (prev_ctx != CONTEXT_KERNEL)
ct_user_exit(prev_ctx); ct_user_exit(prev_ctx);
...@@ -86,33 +86,21 @@ static __always_inline void context_tracking_guest_exit(void) ...@@ -86,33 +86,21 @@ static __always_inline void context_tracking_guest_exit(void)
__ct_user_exit(CONTEXT_GUEST); __ct_user_exit(CONTEXT_GUEST);
} }
/** #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
* ct_state() - return the current context tracking state if known
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
*/
static __always_inline enum ctx_state ct_state(void)
{
return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
}
#else #else
static inline void user_enter(void) { } static inline void user_enter(void) { }
static inline void user_exit(void) { } static inline void user_exit(void) { }
static inline void user_enter_irqoff(void) { } static inline void user_enter_irqoff(void) { }
static inline void user_exit_irqoff(void) { } static inline void user_exit_irqoff(void) { }
static inline enum ctx_state exception_enter(void) { return 0; } static inline int exception_enter(void) { return 0; }
static inline void exception_exit(enum ctx_state prev_ctx) { } static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; } static inline int ct_state(void) { return -1; }
static __always_inline bool context_tracking_guest_enter(void) { return false; } static __always_inline bool context_tracking_guest_enter(void) { return false; }
static inline void context_tracking_guest_exit(void) { } static inline void context_tracking_guest_exit(void) { }
#define CT_WARN_ON(cond) do { } while (0)
#endif /* !CONFIG_CONTEXT_TRACKING_USER */ #endif /* !CONFIG_CONTEXT_TRACKING_USER */
#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE
extern void context_tracking_init(void); extern void context_tracking_init(void);
#else #else
...@@ -130,16 +118,16 @@ extern void ct_idle_exit(void); ...@@ -130,16 +118,16 @@ extern void ct_idle_exit(void);
*/ */
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{ {
return !(arch_atomic_read(this_cpu_ptr(&context_tracking.dynticks)) & 0x1); return !(arch_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
} }
/* /*
* Increment the current CPU's context_tracking structure's ->dynticks field * Increment the current CPU's context_tracking structure's ->state field
* with ordering. Return the new value. * with ordering. Return the new value.
*/ */
static __always_inline unsigned long rcu_dynticks_inc(int incby) static __always_inline unsigned long ct_state_inc(int incby)
{ {
return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.dynticks)); return arch_atomic_add_return(incby, this_cpu_ptr(&context_tracking.state));
} }
#else #else
......
...@@ -6,15 +6,23 @@ ...@@ -6,15 +6,23 @@
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/context_tracking_irq.h> #include <linux/context_tracking_irq.h>
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
enum ctx_state { enum ctx_state {
CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */
CONTEXT_KERNEL = 0, CONTEXT_KERNEL = 0,
CONTEXT_USER, CONTEXT_IDLE = 1,
CONTEXT_GUEST, CONTEXT_USER = 2,
CONTEXT_GUEST = 3,
CONTEXT_MAX = 4,
}; };
/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ /* Even value for idle, else odd. */
#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) #define RCU_DYNTICKS_IDX CONTEXT_MAX
#define CT_STATE_MASK (CONTEXT_MAX - 1)
#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
struct context_tracking { struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING_USER #ifdef CONFIG_CONTEXT_TRACKING_USER
...@@ -26,10 +34,11 @@ struct context_tracking { ...@@ -26,10 +34,11 @@ struct context_tracking {
*/ */
bool active; bool active;
int recursion; int recursion;
enum ctx_state state; #endif
#ifdef CONFIG_CONTEXT_TRACKING
atomic_t state;
#endif #endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE #ifdef CONFIG_CONTEXT_TRACKING_IDLE
atomic_t dynticks; /* Even value for idle, else odd. */
long dynticks_nesting; /* Track process nesting level. */ long dynticks_nesting; /* Track process nesting level. */
long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */
#endif #endif
...@@ -37,26 +46,31 @@ struct context_tracking { ...@@ -37,26 +46,31 @@ struct context_tracking {
#ifdef CONFIG_CONTEXT_TRACKING #ifdef CONFIG_CONTEXT_TRACKING
DECLARE_PER_CPU(struct context_tracking, context_tracking); DECLARE_PER_CPU(struct context_tracking, context_tracking);
static __always_inline int __ct_state(void)
{
return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_STATE_MASK;
}
#endif #endif
#ifdef CONFIG_CONTEXT_TRACKING_IDLE #ifdef CONFIG_CONTEXT_TRACKING_IDLE
static __always_inline int ct_dynticks(void) static __always_inline int ct_dynticks(void)
{ {
return atomic_read(this_cpu_ptr(&context_tracking.dynticks)); return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK;
} }
static __always_inline int ct_dynticks_cpu(int cpu) static __always_inline int ct_dynticks_cpu(int cpu)
{ {
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return atomic_read(&ct->dynticks); return atomic_read(&ct->state) & CT_DYNTICKS_MASK;
} }
static __always_inline int ct_dynticks_cpu_acquire(int cpu) static __always_inline int ct_dynticks_cpu_acquire(int cpu)
{ {
struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu);
return atomic_read_acquire(&ct->dynticks); return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK;
} }
static __always_inline long ct_dynticks_nesting(void) static __always_inline long ct_dynticks_nesting(void)
...@@ -102,6 +116,27 @@ static inline bool context_tracking_enabled_this_cpu(void) ...@@ -102,6 +116,27 @@ static inline bool context_tracking_enabled_this_cpu(void)
return context_tracking_enabled() && __this_cpu_read(context_tracking.active); return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
} }
/**
* ct_state() - return the current context tracking state if known
*
* Returns the current cpu's context tracking state if context tracking
* is enabled. If context tracking is disabled, returns
* CONTEXT_DISABLED. This should be used primarily for debugging.
*/
static __always_inline int ct_state(void)
{
int ret;
if (!context_tracking_enabled())
return CONTEXT_DISABLED;
preempt_disable();
ret = __ct_state();
preempt_enable();
return ret;
}
#else #else
static __always_inline bool context_tracking_enabled(void) { return false; } static __always_inline bool context_tracking_enabled(void) { return false; }
static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; } static __always_inline bool context_tracking_enabled_cpu(int cpu) { return false; }
......
This diff is collapsed.
...@@ -272,9 +272,9 @@ void rcu_softirq_qs(void) ...@@ -272,9 +272,9 @@ void rcu_softirq_qs(void)
*/ */
static void rcu_dynticks_eqs_online(void) static void rcu_dynticks_eqs_online(void)
{ {
if (ct_dynticks() & 0x1) if (ct_dynticks() & RCU_DYNTICKS_IDX)
return; return;
rcu_dynticks_inc(1); ct_state_inc(RCU_DYNTICKS_IDX);
} }
/* /*
...@@ -293,7 +293,7 @@ static int rcu_dynticks_snap(int cpu) ...@@ -293,7 +293,7 @@ static int rcu_dynticks_snap(int cpu)
*/ */
static bool rcu_dynticks_in_eqs(int snap) static bool rcu_dynticks_in_eqs(int snap)
{ {
return !(snap & 0x1); return !(snap & RCU_DYNTICKS_IDX);
} }
/* Return true if the specified CPU is currently idle from an RCU viewpoint. */ /* Return true if the specified CPU is currently idle from an RCU viewpoint. */
...@@ -321,8 +321,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) ...@@ -321,8 +321,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
int snap; int snap;
// If not quiescent, force back to earlier extended quiescent state. // If not quiescent, force back to earlier extended quiescent state.
snap = ct_dynticks_cpu(cpu) & ~0x1; snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
smp_rmb(); // Order ->dynticks and *vp reads. smp_rmb(); // Order ->dynticks and *vp reads.
if (READ_ONCE(*vp)) if (READ_ONCE(*vp))
return false; // Non-zero, so report failure; return false; // Non-zero, so report failure;
...@@ -348,9 +347,9 @@ notrace void rcu_momentary_dyntick_idle(void) ...@@ -348,9 +347,9 @@ notrace void rcu_momentary_dyntick_idle(void)
int seq; int seq;
raw_cpu_write(rcu_data.rcu_need_heavy_qs, false); raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
seq = rcu_dynticks_inc(2); seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
/* It is illegal to call this from idle state. */ /* It is illegal to call this from idle state. */
WARN_ON_ONCE(!(seq & 0x1)); WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
rcu_preempt_deferred_qs(current); rcu_preempt_deferred_qs(current);
} }
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle); EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
......
...@@ -469,7 +469,7 @@ static void print_cpu_stall_info(int cpu) ...@@ -469,7 +469,7 @@ static void print_cpu_stall_info(int cpu)
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j); rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
if (rcuc_starved) if (rcuc_starved)
sprintf(buf, " rcuc=%ld jiffies(starved)", j); sprintf(buf, " rcuc=%ld jiffies(starved)", j);
pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n", pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
cpu, cpu,
"O."[!!cpu_online(cpu)], "O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
...@@ -478,7 +478,7 @@ static void print_cpu_stall_info(int cpu) ...@@ -478,7 +478,7 @@ static void print_cpu_stall_info(int cpu)
rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
"!."[!delta], "!."[!delta],
ticks_value, ticks_title, ticks_value, ticks_title,
rcu_dynticks_snap(cpu) & 0xfff, rcu_dynticks_snap(cpu) & 0xffff,
ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu), ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment