Commit fcc784be authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware) Committed by Ingo Molnar

locking/lockdep: Do not record IRQ state within lockdep code

While debugging where things were going wrong with mapping
enabling/disabling interrupts with the lockdep state and actual real
enabling and disabling interrupts, I had to silent the IRQ
disabling/enabling in debug_check_no_locks_freed() because it was
always showing up as it was called before the splat was.

Use raw_local_irq_save/restore() for not only debug_check_no_locks_freed()
but for all internal lockdep functions, as they hide useful information
about where interrupts were used incorrectly last.
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Link: https://lkml.kernel.org/lkml/20180404140630.3f4f4c7a@gandalf.local.homeSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 03eeafdd
...@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) ...@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.parent = NULL; this.parent = NULL;
this.class = class; this.class = class;
local_irq_save(flags); raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock); arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this); ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock); arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags); raw_local_irq_restore(flags);
return ret; return ret;
} }
...@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) ...@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.parent = NULL; this.parent = NULL;
this.class = class; this.class = class;
local_irq_save(flags); raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock); arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this); ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock); arch_spin_unlock(&lockdep_lock);
local_irq_restore(flags); raw_local_irq_restore(flags);
return ret; return ret;
} }
...@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return; return;
local_irq_save(flags); raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) { for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i; hlock = curr->held_locks + i;
...@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break; break;
} }
local_irq_restore(flags); raw_local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment