Commit 545c23f2 authored by Byungchul Park's avatar Byungchul Park Committed by Ingo Molnar

locking/lockdep: Refactor lookup_chain_cache()

Currently, lookup_chain_cache() provides both 'lookup' and 'add'
functionalities in a function. However, each is useful. So this
patch makes lookup_chain_cache() only do 'lookup' functionality and
makes add_chain_cahce() only do 'add' functionality. And it's more
readable than before.
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akpm@linux-foundation.org
Cc: boqun.feng@gmail.com
Cc: kernel-team@lge.com
Cc: kirill@shutemov.name
Cc: npiggin@gmail.com
Cc: walken@google.com
Cc: willy@infradead.org
Link: http://lkml.kernel.org/r/1502089981-21272-2-git-send-email-byungchul.park@lge.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent ae813308
...@@ -2151,20 +2151,26 @@ static int check_no_collision(struct task_struct *curr, ...@@ -2151,20 +2151,26 @@ static int check_no_collision(struct task_struct *curr,
} }
/* /*
* Look up a dependency chain. If the key is not present yet then * Adds a dependency chain into chain hashtable. And must be called with
* add it and return 1 - in this case the new dependency chain is * graph_lock held.
* validated. If the key is already hashed, return 0. *
* (On return with 1 graph_lock is held.) * Return 0 if fail, and graph_lock is released.
* Return 1 if succeed, with graph_lock held.
*/ */
static inline int lookup_chain_cache(struct task_struct *curr, static inline int add_chain_cache(struct task_struct *curr,
struct held_lock *hlock, struct held_lock *hlock,
u64 chain_key) u64 chain_key)
{ {
struct lock_class *class = hlock_class(hlock); struct lock_class *class = hlock_class(hlock);
struct hlist_head *hash_head = chainhashentry(chain_key); struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain; struct lock_chain *chain;
int i, j; int i, j;
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
/* /*
* We might need to take the graph lock, ensure we've got IRQs * We might need to take the graph lock, ensure we've got IRQs
* disabled to make this an IRQ-safe lock.. for recursion reasons * disabled to make this an IRQ-safe lock.. for recursion reasons
...@@ -2172,43 +2178,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, ...@@ -2172,43 +2178,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
*/ */
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0; return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(chain_lookup_hits);
if (!check_no_collision(curr, hlock, chain))
return 0;
if (very_verbose(class))
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
return 0;
}
}
if (very_verbose(class))
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
hlist_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
if (!debug_locks_off_graph_unlock()) if (!debug_locks_off_graph_unlock())
return 0; return 0;
...@@ -2260,6 +2230,78 @@ static inline int lookup_chain_cache(struct task_struct *curr, ...@@ -2260,6 +2230,78 @@ static inline int lookup_chain_cache(struct task_struct *curr,
return 1; return 1;
} }
/*
* Look up a dependency chain.
*/
static inline struct lock_chain *lookup_chain_cache(u64 chain_key)
{
struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
debug_atomic_inc(chain_lookup_hits);
return chain;
}
}
return NULL;
}
/*
* If the key is not present yet in dependency chain cache then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
*/
static inline int lookup_chain_cache_add(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
struct lock_chain *chain = lookup_chain_cache(chain_key);
if (chain) {
cache_hit:
if (!check_no_collision(curr, hlock, chain))
return 0;
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
}
return 0;
}
if (very_verbose(class)) {
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
}
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
chain = lookup_chain_cache(chain_key);
if (chain) {
graph_unlock();
goto cache_hit;
}
if (!add_chain_cache(curr, hlock, chain_key))
return 0;
return 1;
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head, u64 chain_key) struct held_lock *hlock, int chain_head, u64 chain_key)
{ {
...@@ -2270,11 +2312,11 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, ...@@ -2270,11 +2312,11 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* *
* We look up the chain_key and do the O(N^2) check and update of * We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain. * the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires * (If lookup_chain_cache_add() return with 1 it acquires
* graph_lock for us) * graph_lock for us)
*/ */
if (!hlock->trylock && hlock->check && if (!hlock->trylock && hlock->check &&
lookup_chain_cache(curr, hlock, chain_key)) { lookup_chain_cache_add(curr, hlock, chain_key)) {
/* /*
* Check whether last held lock: * Check whether last held lock:
* *
...@@ -2302,14 +2344,17 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, ...@@ -2302,14 +2344,17 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* Add dependency only if this lock is not the head * Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock: * of the chain, and if it's not a secondary read-lock:
*/ */
if (!chain_head && ret != 2) if (!chain_head && ret != 2) {
if (!check_prevs_add(curr, hlock)) if (!check_prevs_add(curr, hlock))
return 0; return 0;
}
graph_unlock(); graph_unlock();
} else } else {
/* after lookup_chain_cache(): */ /* after lookup_chain_cache_add(): */
if (unlikely(!debug_locks)) if (unlikely(!debug_locks))
return 0; return 0;
}
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment