Commit b3b9c187 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

locking/lockdep: Decrement IRQ context counters when removing lock chain

There are currently three counters to track the IRQ context of a lock
chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains.
They are incremented when a new lock chain is added, but they are
not decremented when a lock chain is removed. That causes some of the
statistic counts reported by /proc/lockdep_stats to be incorrect.
IRQ
Fix that by decrementing the right counter when a lock chain is removed.

Since inc_chains() no longer accesses hardirq_context and softirq_context
directly, it is moved out from the CONFIG_TRACE_IRQFLAGS conditional
compilation block.

Fixes: a0b0fd53 ("locking/lockdep: Free lock classes that are no longer in use")
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20200206152408.24165-2-longman@redhat.com
parent 0a679e13
...@@ -2298,18 +2298,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, ...@@ -2298,18 +2298,6 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
return 0; return 0;
} }
static void inc_chains(void)
{
if (current->hardirq_context)
nr_hardirq_chains++;
else {
if (current->softirq_context)
nr_softirq_chains++;
else
nr_process_chains++;
}
}
#else #else
static inline int check_irq_usage(struct task_struct *curr, static inline int check_irq_usage(struct task_struct *curr,
...@@ -2317,13 +2305,27 @@ static inline int check_irq_usage(struct task_struct *curr, ...@@ -2317,13 +2305,27 @@ static inline int check_irq_usage(struct task_struct *curr,
{ {
return 1; return 1;
} }
#endif /* CONFIG_TRACE_IRQFLAGS */
static inline void inc_chains(void) static void inc_chains(int irq_context)
{ {
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains++;
else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
nr_softirq_chains++;
else
nr_process_chains++; nr_process_chains++;
} }
#endif /* CONFIG_TRACE_IRQFLAGS */ static void dec_chains(int irq_context)
{
if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains--;
else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
nr_softirq_chains--;
else
nr_process_chains--;
}
static void static void
print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv) print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
...@@ -2843,7 +2845,7 @@ static inline int add_chain_cache(struct task_struct *curr, ...@@ -2843,7 +2845,7 @@ static inline int add_chain_cache(struct task_struct *curr,
hlist_add_head_rcu(&chain->entry, hash_head); hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses); debug_atomic_inc(chain_lookup_misses);
inc_chains(); inc_chains(chain->irq_context);
return 1; return 1;
} }
...@@ -3596,7 +3598,8 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) ...@@ -3596,7 +3598,8 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
static inline unsigned int task_irq_context(struct task_struct *task) static inline unsigned int task_irq_context(struct task_struct *task)
{ {
return 2 * !!task->hardirq_context + !!task->softirq_context; return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
} }
static int separate_irq_context(struct task_struct *curr, static int separate_irq_context(struct task_struct *curr,
...@@ -4798,6 +4801,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf, ...@@ -4798,6 +4801,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
return; return;
/* Overwrite the chain key for concurrent RCU readers. */ /* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, chain_key); WRITE_ONCE(chain->chain_key, chain_key);
dec_chains(chain->irq_context);
/* /*
* Note: calling hlist_del_rcu() from inside a * Note: calling hlist_del_rcu() from inside a
* hlist_for_each_entry_rcu() loop is safe. * hlist_for_each_entry_rcu() loop is safe.
...@@ -4819,6 +4824,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf, ...@@ -4819,6 +4824,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
} }
*new_chain = *chain; *new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key)); hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
inc_chains(new_chain->irq_context);
#endif #endif
} }
......
...@@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ = ...@@ -106,6 +106,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =
#define STACK_TRACE_HASH_SIZE 16384 #define STACK_TRACE_HASH_SIZE 16384
#endif #endif
/*
* Bit definitions for lock_chain.irq_context
*/
#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment