Commit 35a9393c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

lockdep: Fix the module unload key range freeing logic

Module unload calls lockdep_free_key_range(), which removes entries
from the data structures. Most of the lockdep code OTOH assumes the
data structures are append only; in specific see the comments in
add_lock_to_list() and look_up_lock_class().

Clearly this has only worked by accident; make it work proper. The
actual scenario to make it go boom would involve the memory freed by
the module unlock being re-allocated and re-used for a lock inside of
a rcu-sched grace period. This is a very unlikely scenario, still
better plug the hole.

Use RCU list iteration in all places and ammend the comments.

Change lockdep_free_key_range() to issue a sync_sched() between
removal from the lists and returning -- which results in the memory
being freed. Further ensure the callers are placed correctly and
comment the requirements.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Tsyvarev <tsyvarev@ispras.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent bc465aa9
...@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) ...@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
if (!new_class->name) if (!new_class->name)
return 0; return 0;
list_for_each_entry(class, &all_lock_classes, lock_entry) { list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key) if (new_class->key - new_class->subclass == class->key)
return class->name_version; return class->name_version;
if (class->name && !strcmp(class->name, new_class->name)) if (class->name && !strcmp(class->name, new_class->name))
...@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) ...@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
hash_head = classhashentry(key); hash_head = classhashentry(key);
/* /*
* We can walk the hash lockfree, because the hash only * We do an RCU walk of the hash, see lockdep_free_key_range().
* grows, and we are careful when adding entries to the end:
*/ */
list_for_each_entry(class, hash_head, hash_entry) { if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) { if (class->key == key) {
/* /*
* Huh! same key, different name? Did someone trample * Huh! same key, different name? Did someone trample
...@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) ...@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct lockdep_subclass_key *key; struct lockdep_subclass_key *key;
struct list_head *hash_head; struct list_head *hash_head;
struct lock_class *class; struct lock_class *class;
unsigned long flags;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass); class = look_up_lock_class(lock, subclass);
if (likely(class)) if (likely(class))
...@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) ...@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
key = lock->key->subkeys + subclass; key = lock->key->subkeys + subclass;
hash_head = classhashentry(key); hash_head = classhashentry(key);
raw_local_irq_save(flags);
if (!graph_lock()) { if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
/* /*
* We have to do the hash-walk again, to avoid races * We have to do the hash-walk again, to avoid races
* with another CPU: * with another CPU:
*/ */
list_for_each_entry(class, hash_head, hash_entry) list_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) if (class->key == key)
goto out_unlock_set; goto out_unlock_set;
}
/* /*
* Allocate a new key from the static array, and add it to * Allocate a new key from the static array, and add it to
* the hash: * the hash:
*/ */
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
if (!debug_locks_off_graph_unlock()) { if (!debug_locks_off_graph_unlock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
raw_local_irq_restore(flags);
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack(); dump_stack();
...@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) ...@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) { if (verbose(class)) {
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags);
printk("\nnew class %p: %s", class->key, class->name); printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1) if (class->name_version > 1)
...@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) ...@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk("\n"); printk("\n");
dump_stack(); dump_stack();
raw_local_irq_save(flags);
if (!graph_lock()) { if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL; return NULL;
} }
} }
out_unlock_set: out_unlock_set:
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags);
out_set_class_cache: out_set_class_cache:
if (!subclass || force) if (!subclass || force)
...@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, ...@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
entry->distance = distance; entry->distance = distance;
entry->trace = *trace; entry->trace = *trace;
/* /*
* Since we never remove from the dependency list, the list can * Both allocation and removal are done under the graph lock; but
* be walked lockless by other CPUs, it's only allocation * iteration is under RCU-sched; see look_up_lock_class() and
* that must be protected by the spinlock. But this also means * lockdep_free_key_range().
* we must make new entries visible only once writes to the
* entry become visible - hence the RCU op:
*/ */
list_add_tail_rcu(&entry->entry, head); list_add_tail_rcu(&entry->entry, head);
...@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, ...@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
else else
head = &lock->class->locks_before; head = &lock->class->locks_before;
list_for_each_entry(entry, head, entry) { DEBUG_LOCKS_WARN_ON(!irqs_disabled());
list_for_each_entry_rcu(entry, head, entry) {
if (!lock_accessed(entry)) { if (!lock_accessed(entry)) {
unsigned int cq_depth; unsigned int cq_depth;
mark_lock_accessed(entry, lock); mark_lock_accessed(entry, lock);
...@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, ...@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
* We can walk it lock-free, because entries only get added * We can walk it lock-free, because entries only get added
* to the hash: * to the hash:
*/ */
list_for_each_entry(chain, hash_head, entry) { list_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) { if (chain->chain_key == chain_key) {
cache_hit: cache_hit:
debug_atomic_inc(chain_lookup_hits); debug_atomic_inc(chain_lookup_hits);
...@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, ...@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return; return;
if (subclass) if (subclass) {
unsigned long flags;
if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
current->lockdep_recursion = 1;
register_lock_class(lock, subclass, 1); register_lock_class(lock, subclass, 1);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
} }
EXPORT_SYMBOL_GPL(lockdep_init_map); EXPORT_SYMBOL_GPL(lockdep_init_map);
...@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) ...@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
return addr >= start && addr < start + size; return addr >= start && addr < start + size;
} }
/*
* Used in module.c to remove lock classes from memory that is going to be
* freed; and possibly re-used by other modules.
*
* We will have had one sync_sched() before getting here, so we're guaranteed
* nobody will look up these exact classes -- they're properly dead but still
* allocated.
*/
void lockdep_free_key_range(void *start, unsigned long size) void lockdep_free_key_range(void *start, unsigned long size)
{ {
struct lock_class *class, *next; struct lock_class *class;
struct list_head *head; struct list_head *head;
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) ...@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
head = classhash_table + i; head = classhash_table + i;
if (list_empty(head)) if (list_empty(head))
continue; continue;
list_for_each_entry_safe(class, next, head, hash_entry) { list_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size)) if (within(class->key, start, size))
zap_class(class); zap_class(class);
else if (within(class->name, start, size)) else if (within(class->name, start, size))
...@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) ...@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
if (locked) if (locked)
graph_unlock(); graph_unlock();
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
*
* sync_sched() is sufficient because the read-side is IRQ disable.
*/
synchronize_sched();
/*
* XXX at this point we could return the resources to the pool;
* instead we leak them. We would need to change to bitmap allocators
* instead of the linear allocators we have now.
*/
} }
void lockdep_reset_lock(struct lockdep_map *lock) void lockdep_reset_lock(struct lockdep_map *lock)
{ {
struct lock_class *class, *next; struct lock_class *class;
struct list_head *head; struct list_head *head;
unsigned long flags; unsigned long flags;
int i, j; int i, j;
...@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) ...@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
head = classhash_table + i; head = classhash_table + i;
if (list_empty(head)) if (list_empty(head))
continue; continue;
list_for_each_entry_safe(class, next, head, hash_entry) { list_for_each_entry_rcu(class, head, hash_entry) {
int match = 0; int match = 0;
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
......
...@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod) ...@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
kfree(mod->args); kfree(mod->args);
percpu_modfree(mod); percpu_modfree(mod);
/* Free lock-classes: */ /* Free lock-classes; relies on the preceding sync_rcu(). */
lockdep_free_key_range(mod->module_core, mod->core_size); lockdep_free_key_range(mod->module_core, mod->core_size);
/* Finally, free the core (containing the module structure) */ /* Finally, free the core (containing the module structure) */
...@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup(mod); module_bug_cleanup(mod);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
/* we can't deallocate the module until we clear memory protection */ /* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx(mod); unset_module_init_ro_nx(mod);
unset_module_core_ro_nx(mod); unset_module_core_ro_nx(mod);
...@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
synchronize_rcu(); synchronize_rcu();
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
free_module: free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */
lockdep_free_key_range(mod->module_core, mod->core_size);
module_deallocate(mod, info); module_deallocate(mod, info);
free_copy: free_copy:
free_copy(info); free_copy(info);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment