Commit 23f873d8 authored by Byungchul Park's avatar Byungchul Park Committed by Ingo Molnar

locking/lockdep: Detect and handle hist_lock ring buffer overwrite

The ring buffer can be overwritten by hardirq/softirq/work contexts.
That cases must be considered on rollback or commit. For example,

          |<------ hist_lock ring buffer size ----->|
          ppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii
wrapped > iiiiiiiiiiiiiiiiiiiiiii....................

          where 'p' represents an acquisition in process context,
          'i' represents an acquisition in irq context.

On irq exit, crossrelease tries to rollback idx to original position,
but it should not because the entry already has been invalid by
overwriting 'i'. Avoid rollback or commit for entries overwritten.
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akpm@linux-foundation.org
Cc: boqun.feng@gmail.com
Cc: kernel-team@lge.com
Cc: kirill@shutemov.name
Cc: npiggin@gmail.com
Cc: walken@google.com
Cc: willy@infradead.org
Link: http://lkml.kernel.org/r/1502089981-21272-7-git-send-email-byungchul.park@lge.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b09be676
...@@ -283,6 +283,26 @@ struct held_lock { ...@@ -283,6 +283,26 @@ struct held_lock {
* can be added at commit step. * can be added at commit step.
*/ */
struct hist_lock { struct hist_lock {
/*
* Id for each entry in the ring buffer. This is used to
* decide whether the ring buffer was overwritten or not.
*
* For example,
*
* |<----------- hist_lock ring buffer size ------->|
* pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
* wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
*
* where 'p' represents an acquisition in process
* context, 'i' represents an acquisition in irq
* context.
*
* In this example, the ring buffer was overwritten by
* acquisitions in irq context, that should be detected on
* rollback or commit.
*/
unsigned int hist_id;
/* /*
* Seperate stack_trace data. This will be used at commit step. * Seperate stack_trace data. This will be used at commit step.
*/ */
......
...@@ -854,6 +854,9 @@ struct task_struct { ...@@ -854,6 +854,9 @@ struct task_struct {
unsigned int xhlock_idx; unsigned int xhlock_idx;
/* For restoring at history boundaries */ /* For restoring at history boundaries */
unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
unsigned int hist_id;
/* For overwrite check at each context exit */
unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif #endif
#ifdef CONFIG_UBSAN #ifdef CONFIG_UBSAN
......
...@@ -4680,6 +4680,17 @@ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); ...@@ -4680,6 +4680,17 @@ EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
*/ */
static atomic_t cross_gen_id; /* Can be wrapped */ static atomic_t cross_gen_id; /* Can be wrapped */
/*
* Make an entry of the ring buffer invalid.
*/
static inline void invalidate_xhlock(struct hist_lock *xhlock)
{
/*
* Normally, xhlock->hlock.instance must be !NULL.
*/
xhlock->hlock.instance = NULL;
}
/* /*
* Lock history stacks; we have 3 nested lock history stacks: * Lock history stacks; we have 3 nested lock history stacks:
* *
...@@ -4712,14 +4723,28 @@ static atomic_t cross_gen_id; /* Can be wrapped */ ...@@ -4712,14 +4723,28 @@ static atomic_t cross_gen_id; /* Can be wrapped */
*/ */
void crossrelease_hist_start(enum xhlock_context_t c) void crossrelease_hist_start(enum xhlock_context_t c)
{ {
if (current->xhlocks) struct task_struct *cur = current;
current->xhlock_idx_hist[c] = current->xhlock_idx;
if (cur->xhlocks) {
cur->xhlock_idx_hist[c] = cur->xhlock_idx;
cur->hist_id_save[c] = cur->hist_id;
}
} }
void crossrelease_hist_end(enum xhlock_context_t c) void crossrelease_hist_end(enum xhlock_context_t c)
{ {
if (current->xhlocks) struct task_struct *cur = current;
current->xhlock_idx = current->xhlock_idx_hist[c];
if (cur->xhlocks) {
unsigned int idx = cur->xhlock_idx_hist[c];
struct hist_lock *h = &xhlock(idx);
cur->xhlock_idx = idx;
/* Check if the ring was overwritten. */
if (h->hist_id != cur->hist_id_save[c])
invalidate_xhlock(h);
}
} }
static int cross_lock(struct lockdep_map *lock) static int cross_lock(struct lockdep_map *lock)
...@@ -4765,6 +4790,7 @@ static inline int depend_after(struct held_lock *hlock) ...@@ -4765,6 +4790,7 @@ static inline int depend_after(struct held_lock *hlock)
* Check if the xhlock is valid, which would be false if, * Check if the xhlock is valid, which would be false if,
* *
* 1. Has not used after initializaion yet. * 1. Has not used after initializaion yet.
* 2. Got invalidated.
* *
* Remind hist_lock is implemented as a ring buffer. * Remind hist_lock is implemented as a ring buffer.
*/ */
...@@ -4796,6 +4822,7 @@ static void add_xhlock(struct held_lock *hlock) ...@@ -4796,6 +4822,7 @@ static void add_xhlock(struct held_lock *hlock)
/* Initialize hist_lock's members */ /* Initialize hist_lock's members */
xhlock->hlock = *hlock; xhlock->hlock = *hlock;
xhlock->hist_id = current->hist_id++;
xhlock->trace.nr_entries = 0; xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES; xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
...@@ -4934,6 +4961,7 @@ static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock) ...@@ -4934,6 +4961,7 @@ static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock)
static void commit_xhlocks(struct cross_lock *xlock) static void commit_xhlocks(struct cross_lock *xlock)
{ {
unsigned int cur = current->xhlock_idx; unsigned int cur = current->xhlock_idx;
unsigned int prev_hist_id = xhlock(cur).hist_id;
unsigned int i; unsigned int i;
if (!graph_lock()) if (!graph_lock())
...@@ -4951,6 +4979,17 @@ static void commit_xhlocks(struct cross_lock *xlock) ...@@ -4951,6 +4979,17 @@ static void commit_xhlocks(struct cross_lock *xlock)
if (!same_context_xhlock(xhlock)) if (!same_context_xhlock(xhlock))
break; break;
/*
* Filter out the cases that the ring buffer was
* overwritten and the previous entry has a bigger
* hist_id than the following one, which is impossible
* otherwise.
*/
if (unlikely(before(xhlock->hist_id, prev_hist_id)))
break;
prev_hist_id = xhlock->hist_id;
/* /*
* commit_xhlock() returns 0 with graph_lock already * commit_xhlock() returns 0 with graph_lock already
* released if fail. * released if fail.
...@@ -5024,9 +5063,12 @@ void lockdep_init_task(struct task_struct *task) ...@@ -5024,9 +5063,12 @@ void lockdep_init_task(struct task_struct *task)
int i; int i;
task->xhlock_idx = UINT_MAX; task->xhlock_idx = UINT_MAX;
task->hist_id = 0;
for (i = 0; i < XHLOCK_CTX_NR; i++) for (i = 0; i < XHLOCK_CTX_NR; i++) {
task->xhlock_idx_hist[i] = UINT_MAX; task->xhlock_idx_hist[i] = UINT_MAX;
task->hist_id_save[i] = 0;
}
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR, task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL); GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment