Commit 1ca7d67c authored by John Stultz's avatar John Stultz Committed by Ingo Molnar

seqcount: Add lockdep functionality to seqcount/seqlock structures

Currently seqlocks and seqcounts don't support lockdep.

After running across a seqcount related deadlock in the timekeeping
code, I used a less-refined and more focused variant of this patch
to narrow down the cause of the issue.

This is a first-pass attempt to properly enable lockdep functionality
on seqlocks and seqcounts.

Since seqcounts are used in the vdso gettimeofday code, I've provided
non-lockdep accessors for those needs.

I've also handled one case where there were nested seqlock writers
and there may be more edge cases.

Comments and feedback would be appreciated!
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: netdev@vger.kernel.org
Link: http://lkml.kernel.org/r/1381186321-4906-3-git-send-email-john.stultz@linaro.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 827da44c
...@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) ...@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
ts->tv_nsec = 0; ts->tv_nsec = 0;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin_no_lockdep(&gtod->seq);
mode = gtod->clock.vclock_mode; mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec; ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec; ns = gtod->wall_time_snsec;
...@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) ...@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
ts->tv_nsec = 0; ts->tv_nsec = 0;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin_no_lockdep(&gtod->seq);
mode = gtod->clock.vclock_mode; mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec; ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec; ns = gtod->monotonic_time_snsec;
...@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) ...@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin_no_lockdep(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec; ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
...@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) ...@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
do { do {
seq = read_seqcount_begin(&gtod->seq); seq = read_seqcount_begin_no_lockdep(&gtod->seq);
ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq))); } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
......
...@@ -2574,7 +2574,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target) ...@@ -2574,7 +2574,7 @@ static void __d_move(struct dentry * dentry, struct dentry * target)
dentry_lock_for_move(dentry, target); dentry_lock_for_move(dentry, target);
write_seqcount_begin(&dentry->d_seq); write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin(&target->d_seq); write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
/* __d_drop does write_seqcount_barrier, but they're OK to nest. */ /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
...@@ -2706,7 +2706,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) ...@@ -2706,7 +2706,7 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
dentry_lock_for_move(anon, dentry); dentry_lock_for_move(anon, dentry);
write_seqcount_begin(&dentry->d_seq); write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin(&anon->d_seq); write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
dparent = dentry->d_parent; dparent = dentry->d_parent;
......
...@@ -161,6 +161,6 @@ EXPORT_SYMBOL(current_umask); ...@@ -161,6 +161,6 @@ EXPORT_SYMBOL(current_umask);
struct fs_struct init_fs = { struct fs_struct init_fs = {
.users = 1, .users = 1,
.lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
.seq = SEQCNT_ZERO, .seq = SEQCNT_ZERO(init_fs.seq),
.umask = 0022, .umask = 0022,
}; };
...@@ -32,10 +32,10 @@ extern struct fs_struct init_fs; ...@@ -32,10 +32,10 @@ extern struct fs_struct init_fs;
#endif #endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ \ #define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO, .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
#else #else
#define INIT_CPUSET_SEQ #define INIT_CPUSET_SEQ(tsk)
#endif #endif
#define INIT_SIGNALS(sig) { \ #define INIT_SIGNALS(sig) { \
...@@ -220,7 +220,7 @@ extern struct task_group root_task_group; ...@@ -220,7 +220,7 @@ extern struct task_group root_task_group;
INIT_FTRACE_GRAPH \ INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \ INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \ INIT_TASK_RCU_PREEMPT(tsk) \
INIT_CPUSET_SEQ \ INIT_CPUSET_SEQ(tsk) \
INIT_VTIME(tsk) \ INIT_VTIME(tsk) \
} }
......
...@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -497,6 +497,10 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define rwlock_release(l, n, i) lock_release(l, n, i) #define rwlock_release(l, n, i) lock_release(l, n, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
#define seqcount_release(l, n, i) lock_release(l, n, i)
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define mutex_release(l, n, i) lock_release(l, n, i) #define mutex_release(l, n, i) lock_release(l, n, i)
...@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr) ...@@ -504,11 +508,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
# define rwsem_release(l, n, i) lock_release(l, n, i) #define rwsem_release(l, n, i) lock_release(l, n, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) #define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \ # define might_lock(lock) \
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/lockdep.h>
#include <asm/processor.h> #include <asm/processor.h>
/* /*
...@@ -44,10 +45,50 @@ ...@@ -44,10 +45,50 @@
*/ */
typedef struct seqcount { typedef struct seqcount {
unsigned sequence; unsigned sequence;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
} seqcount_t; } seqcount_t;
#define SEQCNT_ZERO { 0 } static inline void __seqcount_init(seqcount_t *s, const char *name,
#define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) struct lock_class_key *key)
{
/*
* Make sure we are not reinitializing a held lock:
*/
lockdep_init_map(&s->dep_map, name, key, 0);
s->sequence = 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define SEQCOUNT_DEP_MAP_INIT(lockname) \
.dep_map = { .name = #lockname } \
# define seqcount_init(s) \
do { \
static struct lock_class_key __key; \
__seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
{
seqcount_t *l = (seqcount_t *)s;
unsigned long flags;
local_irq_save(flags);
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
seqcount_release(&l->dep_map, 1, _RET_IP_);
local_irq_restore(flags);
}
#else
# define SEQCOUNT_DEP_MAP_INIT(lockname)
# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
# define seqcount_lockdep_reader_access(x)
#endif
#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
/** /**
* __read_seqcount_begin - begin a seq-read critical section (without barrier) * __read_seqcount_begin - begin a seq-read critical section (without barrier)
...@@ -75,6 +116,22 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) ...@@ -75,6 +116,22 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
return ret; return ret;
} }
/**
* read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep
* @s: pointer to seqcount_t
* Returns: count to be passed to read_seqcount_retry
*
* read_seqcount_begin_no_lockdep opens a read critical section of the given
* seqcount, but without any lockdep checking. Validity of the critical
* section is tested by checking read_seqcount_retry function.
*/
static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s)
{
unsigned ret = __read_seqcount_begin(s);
smp_rmb();
return ret;
}
/** /**
* read_seqcount_begin - begin a seq-read critical section * read_seqcount_begin - begin a seq-read critical section
* @s: pointer to seqcount_t * @s: pointer to seqcount_t
...@@ -86,9 +143,8 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) ...@@ -86,9 +143,8 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
*/ */
static inline unsigned read_seqcount_begin(const seqcount_t *s) static inline unsigned read_seqcount_begin(const seqcount_t *s)
{ {
unsigned ret = __read_seqcount_begin(s); seqcount_lockdep_reader_access(s);
smp_rmb(); return read_seqcount_begin_no_lockdep(s);
return ret;
} }
/** /**
...@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) ...@@ -108,6 +164,8 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
static inline unsigned raw_seqcount_begin(const seqcount_t *s) static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{ {
unsigned ret = ACCESS_ONCE(s->sequence); unsigned ret = ACCESS_ONCE(s->sequence);
seqcount_lockdep_reader_access(s);
smp_rmb(); smp_rmb();
return ret & ~1; return ret & ~1;
} }
...@@ -152,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) ...@@ -152,14 +210,21 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
* Sequence counter only version assumes that callers are using their * Sequence counter only version assumes that callers are using their
* own mutexing. * own mutexing.
*/ */
static inline void write_seqcount_begin(seqcount_t *s) static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
{ {
s->sequence++; s->sequence++;
smp_wmb(); smp_wmb();
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
static inline void write_seqcount_begin(seqcount_t *s)
{
write_seqcount_begin_nested(s, 0);
} }
static inline void write_seqcount_end(seqcount_t *s) static inline void write_seqcount_end(seqcount_t *s)
{ {
seqcount_release(&s->dep_map, 1, _RET_IP_);
smp_wmb(); smp_wmb();
s->sequence++; s->sequence++;
} }
...@@ -188,7 +253,7 @@ typedef struct { ...@@ -188,7 +253,7 @@ typedef struct {
*/ */
#define __SEQLOCK_UNLOCKED(lockname) \ #define __SEQLOCK_UNLOCKED(lockname) \
{ \ { \
.seqcount = SEQCNT_ZERO, \ .seqcount = SEQCNT_ZERO(lockname), \
.lock = __SPIN_LOCK_UNLOCKED(lockname) \ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
} }
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* of ZERO_PAGE(), such as /dev/zero * of ZERO_PAGE(), such as /dev/zero
*/ */
static DEFINE_MUTEX(xip_sparse_mutex); static DEFINE_MUTEX(xip_sparse_mutex);
static seqcount_t xip_sparse_seq = SEQCNT_ZERO; static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq);
static struct page *__xip_sparse_page; static struct page *__xip_sparse_page;
/* called under xip_sparse_mutex */ /* called under xip_sparse_mutex */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment