Commit 419217cb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'v2.6.24-lockdep' of...

Merge branch 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep

* 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep:
  lockdep: annotate dir vs file i_mutex
  lockdep: per filesystem inode lock class
  lockdep: annotate kprobes irq fiddling
  lockdep: annotate rcu_read_{,un}lock{,_bh}
  lockdep: annotate journal_start()
  lockdep: s390: connect the sysexit hook
  lockdep: x86_64: connect the sysexit hook
  lockdep: i386: connect the sysexit hook
  lockdep: syscall exit check
  lockdep: fixup mutex annotations
  lockdep: fix mismatched lockdep_depth/curr_chain_hash
  lockdep: Avoid /proc/lockdep & lock_stat infinite output
  lockdep: maintainers
parents 4937ce87 14358e6d
...@@ -2404,6 +2404,15 @@ M: khali@linux-fr.org ...@@ -2404,6 +2404,15 @@ M: khali@linux-fr.org
L: lm-sensors@lm-sensors.org L: lm-sensors@lm-sensors.org
S: Maintained S: Maintained
LOCKDEP AND LOCKSTAT
P: Peter Zijlstra
M: peterz@infradead.org
P: Ingo Molnar
M: mingo@redhat.com
L: linux-kernel@vger.kernel.org
T: git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
S: Maintained
LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks) LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
P: Richard Russon (FlatCap) P: Richard Russon (FlatCap)
M: ldm@flatcap.org M: ldm@flatcap.org
......
...@@ -68,9 +68,15 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -68,9 +68,15 @@ STACK_SIZE = 1 << STACK_SHIFT
l %r1,BASED(.Ltrace_irq_off) l %r1,BASED(.Ltrace_irq_off)
basr %r14,%r1 basr %r14,%r1
.endm .endm
.macro LOCKDEP_SYS_EXIT
l %r1,BASED(.Llockdep_sys_exit)
basr %r14,%r1
.endm
#else #else
#define TRACE_IRQS_ON #define TRACE_IRQS_ON
#define TRACE_IRQS_OFF #define TRACE_IRQS_OFF
#define LOCKDEP_SYS_EXIT
#endif #endif
/* /*
...@@ -260,6 +266,7 @@ sysc_return: ...@@ -260,6 +266,7 @@ sysc_return:
bno BASED(sysc_leave) bno BASED(sysc_leave)
tm __TI_flags+3(%r9),_TIF_WORK_SVC tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.) bnz BASED(sysc_work) # there is work to do (signals etc.)
LOCKDEP_SYS_EXIT
sysc_leave: sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1 RESTORE_ALL __LC_RETURN_PSW,1
...@@ -283,6 +290,7 @@ sysc_work: ...@@ -283,6 +290,7 @@ sysc_work:
bo BASED(sysc_restart) bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep) bo BASED(sysc_singlestep)
LOCKDEP_SYS_EXIT
b BASED(sysc_leave) b BASED(sysc_leave)
# #
...@@ -572,6 +580,7 @@ io_return: ...@@ -572,6 +580,7 @@ io_return:
#endif #endif
tm __TI_flags+3(%r9),_TIF_WORK_INT tm __TI_flags+3(%r9),_TIF_WORK_INT
bnz BASED(io_work) # there is work to do (signals etc.) bnz BASED(io_work) # there is work to do (signals etc.)
LOCKDEP_SYS_EXIT
io_leave: io_leave:
RESTORE_ALL __LC_RETURN_PSW,0 RESTORE_ALL __LC_RETURN_PSW,0
io_done: io_done:
...@@ -618,6 +627,7 @@ io_work_loop: ...@@ -618,6 +627,7 @@ io_work_loop:
bo BASED(io_reschedule) bo BASED(io_reschedule)
tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
bnz BASED(io_sigpending) bnz BASED(io_sigpending)
LOCKDEP_SYS_EXIT
b BASED(io_leave) b BASED(io_leave)
# #
...@@ -1040,6 +1050,8 @@ cleanup_io_leave_insn: ...@@ -1040,6 +1050,8 @@ cleanup_io_leave_insn:
.Ltrace_irq_on: .long trace_hardirqs_on .Ltrace_irq_on: .long trace_hardirqs_on
.Ltrace_irq_off: .Ltrace_irq_off:
.long trace_hardirqs_off .long trace_hardirqs_off
.Llockdep_sys_exit:
.long lockdep_sys_exit
#endif #endif
.Lcritical_start: .Lcritical_start:
.long __critical_start + 0x80000000 .long __critical_start + 0x80000000
......
...@@ -66,9 +66,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \ ...@@ -66,9 +66,14 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
.macro TRACE_IRQS_OFF .macro TRACE_IRQS_OFF
brasl %r14,trace_hardirqs_off brasl %r14,trace_hardirqs_off
.endm .endm
.macro LOCKDEP_SYS_EXIT
brasl %r14,lockdep_sys_exit
.endm
#else #else
#define TRACE_IRQS_ON #define TRACE_IRQS_ON
#define TRACE_IRQS_OFF #define TRACE_IRQS_OFF
#define LOCKDEP_SYS_EXIT
#endif #endif
.macro STORE_TIMER lc_offset .macro STORE_TIMER lc_offset
...@@ -255,6 +260,7 @@ sysc_return: ...@@ -255,6 +260,7 @@ sysc_return:
jno sysc_leave jno sysc_leave
tm __TI_flags+7(%r9),_TIF_WORK_SVC tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.) jnz sysc_work # there is work to do (signals etc.)
LOCKDEP_SYS_EXIT
sysc_leave: sysc_leave:
RESTORE_ALL __LC_RETURN_PSW,1 RESTORE_ALL __LC_RETURN_PSW,1
...@@ -278,6 +284,7 @@ sysc_work: ...@@ -278,6 +284,7 @@ sysc_work:
jo sysc_restart jo sysc_restart
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
jo sysc_singlestep jo sysc_singlestep
LOCKDEP_SYS_EXIT
j sysc_leave j sysc_leave
# #
...@@ -558,6 +565,7 @@ io_return: ...@@ -558,6 +565,7 @@ io_return:
#endif #endif
tm __TI_flags+7(%r9),_TIF_WORK_INT tm __TI_flags+7(%r9),_TIF_WORK_INT
jnz io_work # there is work to do (signals etc.) jnz io_work # there is work to do (signals etc.)
LOCKDEP_SYS_EXIT
io_leave: io_leave:
RESTORE_ALL __LC_RETURN_PSW,0 RESTORE_ALL __LC_RETURN_PSW,0
io_done: io_done:
...@@ -605,6 +613,7 @@ io_work_loop: ...@@ -605,6 +613,7 @@ io_work_loop:
jo io_reschedule jo io_reschedule
tm __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK) tm __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
jnz io_sigpending jnz io_sigpending
LOCKDEP_SYS_EXIT
j io_leave j io_leave
# #
......
...@@ -251,6 +251,7 @@ check_userspace: ...@@ -251,6 +251,7 @@ check_userspace:
jb resume_kernel # not returning to v8086 or userspace jb resume_kernel # not returning to v8086 or userspace
ENTRY(resume_userspace) ENTRY(resume_userspace)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
...@@ -338,6 +339,7 @@ sysenter_past_esp: ...@@ -338,6 +339,7 @@ sysenter_past_esp:
jae syscall_badsys jae syscall_badsys
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp) movl %eax,PT_EAX(%esp)
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
...@@ -377,6 +379,7 @@ syscall_call: ...@@ -377,6 +379,7 @@ syscall_call:
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,PT_EAX(%esp) # store the return value movl %eax,PT_EAX(%esp) # store the return value
syscall_exit: syscall_exit:
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
...@@ -467,6 +470,7 @@ work_pending: ...@@ -467,6 +470,7 @@ work_pending:
jz work_notifysig jz work_notifysig
work_resched: work_resched:
call schedule call schedule
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
......
...@@ -244,6 +244,7 @@ ret_from_sys_call: ...@@ -244,6 +244,7 @@ ret_from_sys_call:
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK,%edi
/* edi: flagmask */ /* edi: flagmask */
sysret_check: sysret_check:
LOCKDEP_SYS_EXIT
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
cli cli
TRACE_IRQS_OFF TRACE_IRQS_OFF
...@@ -333,6 +334,7 @@ int_ret_from_sys_call: ...@@ -333,6 +334,7 @@ int_ret_from_sys_call:
movl $_TIF_ALLWORK_MASK,%edi movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */ /* edi: mask to check */
int_with_check: int_with_check:
LOCKDEP_SYS_EXIT_IRQ
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
movl threadinfo_flags(%rcx),%edx movl threadinfo_flags(%rcx),%edx
andl %edi,%edx andl %edi,%edx
...@@ -544,11 +546,13 @@ exit_intr: ...@@ -544,11 +546,13 @@ exit_intr:
retint_with_reschedule: retint_with_reschedule:
movl $_TIF_WORK_MASK,%edi movl $_TIF_WORK_MASK,%edi
retint_check: retint_check:
LOCKDEP_SYS_EXIT_IRQ
movl threadinfo_flags(%rcx),%edx movl threadinfo_flags(%rcx),%edx
andl %edi,%edx andl %edi,%edx
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
jnz retint_careful jnz retint_careful
retint_swapgs:
retint_swapgs: /* return to user-space */
/* /*
* The iretq could re-enable interrupts: * The iretq could re-enable interrupts:
*/ */
...@@ -557,7 +561,7 @@ retint_swapgs: ...@@ -557,7 +561,7 @@ retint_swapgs:
swapgs swapgs
jmp restore_args jmp restore_args
retint_restore_args: retint_restore_args: /* return to kernel space */
cli cli
/* /*
* The iretq could re-enable interrupts: * The iretq could re-enable interrupts:
...@@ -866,26 +870,21 @@ error_sti: ...@@ -866,26 +870,21 @@ error_sti:
movq ORIG_RAX(%rsp),%rsi /* get error code */ movq ORIG_RAX(%rsp),%rsi /* get error code */
movq $-1,ORIG_RAX(%rsp) movq $-1,ORIG_RAX(%rsp)
call *%rax call *%rax
/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */ /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
error_exit: error_exit:
movl %ebx,%eax movl %ebx,%eax
RESTORE_REST RESTORE_REST
cli cli
TRACE_IRQS_OFF TRACE_IRQS_OFF
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
testl %eax,%eax testl %eax,%eax
jne retint_kernel jne retint_kernel
LOCKDEP_SYS_EXIT_IRQ
movl threadinfo_flags(%rcx),%edx movl threadinfo_flags(%rcx),%edx
movl $_TIF_WORK_MASK,%edi movl $_TIF_WORK_MASK,%edi
andl %edi,%edx andl %edi,%edx
jnz retint_careful jnz retint_careful
/* jmp retint_swapgs
* The iret might restore flags:
*/
TRACE_IRQS_IRETQ
swapgs
RESTORE_ARGS 0,8,0
jmp iret_label
CFI_ENDPROC CFI_ENDPROC
error_kernelspace: error_kernelspace:
......
...@@ -557,6 +557,12 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs) ...@@ -557,6 +557,12 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
resume_execution(cur, regs, kcb); resume_execution(cur, regs, kcb);
regs->eflags |= kcb->kprobe_saved_eflags; regs->eflags |= kcb->kprobe_saved_eflags;
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
if (raw_irqs_disabled_flags(regs->eflags))
trace_hardirqs_off();
else
trace_hardirqs_on();
#endif
/*Restore back the original saved kprobes variables and continue. */ /*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) { if (kcb->kprobe_status == KPROBE_REENTER) {
...@@ -694,6 +700,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -694,6 +700,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
MIN_STACK_SIZE(addr)); MIN_STACK_SIZE(addr));
regs->eflags &= ~IF_MASK; regs->eflags &= ~IF_MASK;
trace_hardirqs_off();
regs->eip = (unsigned long)(jp->entry); regs->eip = (unsigned long)(jp->entry);
return 1; return 1;
} }
......
...@@ -544,6 +544,12 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs) ...@@ -544,6 +544,12 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
resume_execution(cur, regs, kcb); resume_execution(cur, regs, kcb);
regs->eflags |= kcb->kprobe_saved_rflags; regs->eflags |= kcb->kprobe_saved_rflags;
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
if (raw_irqs_disabled_flags(regs->eflags))
trace_hardirqs_off();
else
trace_hardirqs_on();
#endif
/* Restore the original saved kprobes variables and continue. */ /* Restore the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) { if (kcb->kprobe_status == KPROBE_REENTER) {
...@@ -684,6 +690,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -684,6 +690,7 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
MIN_STACK_SIZE(addr)); MIN_STACK_SIZE(addr));
regs->eflags &= ~IF_MASK; regs->eflags &= ~IF_MASK;
trace_hardirqs_off();
regs->rip = (unsigned long)(jp->entry); regs->rip = (unsigned long)(jp->entry);
return 1; return 1;
} }
......
...@@ -50,6 +50,10 @@ ...@@ -50,6 +50,10 @@
thunk trace_hardirqs_on_thunk,trace_hardirqs_on thunk trace_hardirqs_on_thunk,trace_hardirqs_on
thunk trace_hardirqs_off_thunk,trace_hardirqs_off thunk trace_hardirqs_off_thunk,trace_hardirqs_off
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
thunk lockdep_sys_exit_thunk,lockdep_sys_exit
#endif
/* SAVE_ARGS below is used only for the .cfi directives it contains. */ /* SAVE_ARGS below is used only for the .cfi directives it contains. */
CFI_STARTPROC CFI_STARTPROC
......
...@@ -142,6 +142,15 @@ static struct inode *alloc_inode(struct super_block *sb) ...@@ -142,6 +142,15 @@ static struct inode *alloc_inode(struct super_block *sb)
return NULL; return NULL;
} }
spin_lock_init(&inode->i_lock);
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
mutex_init(&inode->i_mutex);
lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
init_rwsem(&inode->i_alloc_sem);
lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
mapping->a_ops = &empty_aops; mapping->a_ops = &empty_aops;
mapping->host = inode; mapping->host = inode;
mapping->flags = 0; mapping->flags = 0;
...@@ -190,8 +199,6 @@ void inode_init_once(struct inode *inode) ...@@ -190,8 +199,6 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash); INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_dentry); INIT_LIST_HEAD(&inode->i_dentry);
INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_devices);
mutex_init(&inode->i_mutex);
init_rwsem(&inode->i_alloc_sem);
INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC);
rwlock_init(&inode->i_data.tree_lock); rwlock_init(&inode->i_data.tree_lock);
spin_lock_init(&inode->i_data.i_mmap_lock); spin_lock_init(&inode->i_data.i_mmap_lock);
...@@ -199,7 +206,6 @@ void inode_init_once(struct inode *inode) ...@@ -199,7 +206,6 @@ void inode_init_once(struct inode *inode)
spin_lock_init(&inode->i_data.private_lock); spin_lock_init(&inode->i_data.private_lock);
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
spin_lock_init(&inode->i_lock);
i_size_ordered_init(inode); i_size_ordered_init(inode);
#ifdef CONFIG_INOTIFY #ifdef CONFIG_INOTIFY
INIT_LIST_HEAD(&inode->inotify_watches); INIT_LIST_HEAD(&inode->inotify_watches);
...@@ -561,6 +567,18 @@ EXPORT_SYMBOL(new_inode); ...@@ -561,6 +567,18 @@ EXPORT_SYMBOL(new_inode);
void unlock_new_inode(struct inode *inode) void unlock_new_inode(struct inode *inode)
{ {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct file_system_type *type = inode->i_sb->s_type;
/*
* ensure nobody is actually holding i_mutex
*/
mutex_destroy(&inode->i_mutex);
mutex_init(&inode->i_mutex);
if (inode->i_mode & S_IFDIR)
lockdep_set_class(&inode->i_mutex, &type->i_mutex_dir_key);
else
lockdep_set_class(&inode->i_mutex, &type->i_mutex_key);
#endif
/* /*
* This is special! We do not need the spinlock * This is special! We do not need the spinlock
* when clearing I_LOCK, because we're guaranteed * when clearing I_LOCK, because we're guaranteed
......
...@@ -233,6 +233,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle) ...@@ -233,6 +233,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle)
return ret; return ret;
} }
static struct lock_class_key jbd_handle_key;
/* Allocate a new handle. This should probably be in a slab... */ /* Allocate a new handle. This should probably be in a slab... */
static handle_t *new_handle(int nblocks) static handle_t *new_handle(int nblocks)
{ {
...@@ -243,6 +245,8 @@ static handle_t *new_handle(int nblocks) ...@@ -243,6 +245,8 @@ static handle_t *new_handle(int nblocks)
handle->h_buffer_credits = nblocks; handle->h_buffer_credits = nblocks;
handle->h_ref = 1; handle->h_ref = 1;
lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0);
return handle; return handle;
} }
...@@ -286,6 +290,9 @@ handle_t *journal_start(journal_t *journal, int nblocks) ...@@ -286,6 +290,9 @@ handle_t *journal_start(journal_t *journal, int nblocks)
current->journal_info = NULL; current->journal_info = NULL;
handle = ERR_PTR(err); handle = ERR_PTR(err);
} }
lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
return handle; return handle;
} }
...@@ -1411,6 +1418,8 @@ int journal_stop(handle_t *handle) ...@@ -1411,6 +1418,8 @@ int journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock); spin_unlock(&journal->j_state_lock);
} }
lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
jbd_free_handle(handle); jbd_free_handle(handle);
return err; return err;
} }
......
...@@ -160,4 +160,17 @@ static inline int raw_irqs_disabled(void) ...@@ -160,4 +160,17 @@ static inline int raw_irqs_disabled(void)
# define TRACE_IRQS_OFF # define TRACE_IRQS_OFF
#endif #endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call lockdep_sys_exit; \
popl %edx; \
popl %ecx; \
popl %eax;
#else
# define LOCKDEP_SYS_EXIT
#endif
#endif #endif
...@@ -137,6 +137,20 @@ static inline void halt(void) ...@@ -137,6 +137,20 @@ static inline void halt(void)
# define TRACE_IRQS_ON # define TRACE_IRQS_ON
# define TRACE_IRQS_OFF # define TRACE_IRQS_OFF
# endif # endif
# ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
# define LOCKDEP_SYS_EXIT_IRQ \
TRACE_IRQS_ON; \
sti; \
SAVE_REST; \
LOCKDEP_SYS_EXIT; \
RESTORE_REST; \
cli; \
TRACE_IRQS_OFF;
# else
# define LOCKDEP_SYS_EXIT
# define LOCKDEP_SYS_EXIT_IRQ
# endif
#endif #endif
#endif #endif
...@@ -1302,8 +1302,14 @@ struct file_system_type { ...@@ -1302,8 +1302,14 @@ struct file_system_type {
struct module *owner; struct module *owner;
struct file_system_type * next; struct file_system_type * next;
struct list_head fs_supers; struct list_head fs_supers;
struct lock_class_key s_lock_key; struct lock_class_key s_lock_key;
struct lock_class_key s_umount_key; struct lock_class_key s_umount_key;
struct lock_class_key i_lock_key;
struct lock_class_key i_mutex_key;
struct lock_class_key i_mutex_dir_key;
struct lock_class_key i_alloc_sem_key;
}; };
extern int get_sb_bdev(struct file_system_type *fs_type, extern int get_sb_bdev(struct file_system_type *fs_type,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/bit_spinlock.h> #include <linux/bit_spinlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/lockdep.h>
#include <asm/semaphore.h> #include <asm/semaphore.h>
#endif #endif
...@@ -396,6 +397,10 @@ struct handle_s ...@@ -396,6 +397,10 @@ struct handle_s
unsigned int h_sync: 1; /* sync-on-close */ unsigned int h_sync: 1; /* sync-on-close */
unsigned int h_jdata: 1; /* force data journaling */ unsigned int h_jdata: 1; /* force data journaling */
unsigned int h_aborted: 1; /* fatal error on handle */ unsigned int h_aborted: 1; /* fatal error on handle */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map h_lockdep_map;
#endif
}; };
......
...@@ -238,6 +238,7 @@ extern void lockdep_info(void); ...@@ -238,6 +238,7 @@ extern void lockdep_info(void);
extern void lockdep_reset(void); extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock); extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size); extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void);
extern void lockdep_off(void); extern void lockdep_off(void);
extern void lockdep_on(void); extern void lockdep_on(void);
...@@ -251,6 +252,13 @@ extern void lockdep_on(void); ...@@ -251,6 +252,13 @@ extern void lockdep_on(void);
extern void lockdep_init_map(struct lockdep_map *lock, const char *name, extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass); struct lock_class_key *key, int subclass);
/*
* To initialize a lockdep_map statically use this macro.
* Note that _name must not be NULL.
*/
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
/* /*
* Reinitialize a lock key - for cases where there is special locking or * Reinitialize a lock key - for cases where there is special locking or
* special initialization of locks so that the validator gets the scope * special initialization of locks so that the validator gets the scope
...@@ -317,6 +325,7 @@ static inline void lockdep_on(void) ...@@ -317,6 +325,7 @@ static inline void lockdep_on(void)
# define INIT_LOCKDEP # define INIT_LOCKDEP
# define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_reset() do { debug_locks = 1; } while (0)
# define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_free_key_range(start, size) do { } while (0)
# define lockdep_sys_exit() do { } while (0)
/* /*
* The class key takes no space if lockdep is disabled: * The class key takes no space if lockdep is disabled:
*/ */
......
...@@ -120,14 +120,17 @@ static inline int fastcall mutex_is_locked(struct mutex *lock) ...@@ -120,14 +120,17 @@ static inline int fastcall mutex_is_locked(struct mutex *lock)
* See kernel/mutex.c for detailed documentation of these APIs. * See kernel/mutex.c for detailed documentation of these APIs.
* Also see Documentation/mutex-design.txt. * Also see Documentation/mutex-design.txt.
*/ */
extern void fastcall mutex_lock(struct mutex *lock);
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass); unsigned int subclass);
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#else #else
extern void fastcall mutex_lock(struct mutex *lock);
extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock) # define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
#endif #endif
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <linux/lockdep.h>
/** /**
* struct rcu_head - callback structure for use with RCU * struct rcu_head - callback structure for use with RCU
...@@ -133,6 +134,15 @@ static inline void rcu_bh_qsctr_inc(int cpu) ...@@ -133,6 +134,15 @@ static inline void rcu_bh_qsctr_inc(int cpu)
extern int rcu_pending(int cpu); extern int rcu_pending(int cpu);
extern int rcu_needs_cpu(int cpu); extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
# define rcu_read_acquire() do { } while (0)
# define rcu_read_release() do { } while (0)
#endif
/** /**
* rcu_read_lock - mark the beginning of an RCU read-side critical section. * rcu_read_lock - mark the beginning of an RCU read-side critical section.
* *
...@@ -166,6 +176,7 @@ extern int rcu_needs_cpu(int cpu); ...@@ -166,6 +176,7 @@ extern int rcu_needs_cpu(int cpu);
do { \ do { \
preempt_disable(); \ preempt_disable(); \
__acquire(RCU); \ __acquire(RCU); \
rcu_read_acquire(); \
} while(0) } while(0)
/** /**
...@@ -175,6 +186,7 @@ extern int rcu_needs_cpu(int cpu); ...@@ -175,6 +186,7 @@ extern int rcu_needs_cpu(int cpu);
*/ */
#define rcu_read_unlock() \ #define rcu_read_unlock() \
do { \ do { \
rcu_read_release(); \
__release(RCU); \ __release(RCU); \
preempt_enable(); \ preempt_enable(); \
} while(0) } while(0)
...@@ -204,6 +216,7 @@ extern int rcu_needs_cpu(int cpu); ...@@ -204,6 +216,7 @@ extern int rcu_needs_cpu(int cpu);
do { \ do { \
local_bh_disable(); \ local_bh_disable(); \
__acquire(RCU_BH); \ __acquire(RCU_BH); \
rcu_read_acquire(); \
} while(0) } while(0)
/* /*
...@@ -213,6 +226,7 @@ extern int rcu_needs_cpu(int cpu); ...@@ -213,6 +226,7 @@ extern int rcu_needs_cpu(int cpu);
*/ */
#define rcu_read_unlock_bh() \ #define rcu_read_unlock_bh() \
do { \ do { \
rcu_read_release(); \
__release(RCU_BH); \ __release(RCU_BH); \
local_bh_enable(); \ local_bh_enable(); \
} while(0) } while(0)
......
...@@ -1521,7 +1521,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) ...@@ -1521,7 +1521,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
} }
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head) struct held_lock *hlock, int chain_head, u64 chain_key)
{ {
/* /*
* Trylock needs to maintain the stack of held locks, but it * Trylock needs to maintain the stack of held locks, but it
...@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, ...@@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
* graph_lock for us) * graph_lock for us)
*/ */
if (!hlock->trylock && (hlock->check == 2) && if (!hlock->trylock && (hlock->check == 2) &&
lookup_chain_cache(curr->curr_chain_key, hlock->class)) { lookup_chain_cache(chain_key, hlock->class)) {
/* /*
* Check whether last held lock: * Check whether last held lock:
* *
...@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, ...@@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
#else #else
static inline int validate_chain(struct task_struct *curr, static inline int validate_chain(struct task_struct *curr,
struct lockdep_map *lock, struct held_lock *hlock, struct lockdep_map *lock, struct held_lock *hlock,
int chain_head) int chain_head, u64 chain_key)
{ {
return 1; return 1;
} }
...@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
chain_head = 1; chain_head = 1;
} }
chain_key = iterate_chain_key(chain_key, id); chain_key = iterate_chain_key(chain_key, id);
curr->curr_chain_key = chain_key;
if (!validate_chain(curr, lock, hlock, chain_head)) if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
return 0; return 0;
curr->curr_chain_key = chain_key;
curr->lockdep_depth++; curr->lockdep_depth++;
check_chain_key(curr); check_chain_key(curr);
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
...@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task) ...@@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task)
} }
EXPORT_SYMBOL_GPL(debug_show_held_locks); EXPORT_SYMBOL_GPL(debug_show_held_locks);
void lockdep_sys_exit(void)
{
struct task_struct *curr = current;
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
printk("\n================================================\n");
printk( "[ BUG: lock held when returning to user space! ]\n");
printk( "------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
}
}
...@@ -25,28 +25,38 @@ ...@@ -25,28 +25,38 @@
static void *l_next(struct seq_file *m, void *v, loff_t *pos) static void *l_next(struct seq_file *m, void *v, loff_t *pos)
{ {
struct lock_class *class = v; struct lock_class *class;
(*pos)++; (*pos)++;
if (class->lock_entry.next != &all_lock_classes) if (v == SEQ_START_TOKEN)
class = list_entry(class->lock_entry.next, struct lock_class, class = m->private;
lock_entry); else {
else class = v;
class = NULL;
m->private = class; if (class->lock_entry.next != &all_lock_classes)
class = list_entry(class->lock_entry.next,
struct lock_class, lock_entry);
else
class = NULL;
}
return class; return class;
} }
static void *l_start(struct seq_file *m, loff_t *pos) static void *l_start(struct seq_file *m, loff_t *pos)
{ {
struct lock_class *class = m->private; struct lock_class *class;
loff_t i = 0;
if (&class->lock_entry == all_lock_classes.next) if (*pos == 0)
seq_printf(m, "all lock classes:\n"); return SEQ_START_TOKEN;
return class; list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (++i == *pos)
return class;
}
return NULL;
} }
static void l_stop(struct seq_file *m, void *v) static void l_stop(struct seq_file *m, void *v)
...@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class) ...@@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class)
static int l_show(struct seq_file *m, void *v) static int l_show(struct seq_file *m, void *v)
{ {
unsigned long nr_forward_deps, nr_backward_deps; unsigned long nr_forward_deps, nr_backward_deps;
struct lock_class *class = m->private; struct lock_class *class = v;
struct lock_list *entry; struct lock_list *entry;
char c1, c2, c3, c4; char c1, c2, c3, c4;
if (v == SEQ_START_TOKEN) {
seq_printf(m, "all lock classes:\n");
return 0;
}
seq_printf(m, "%p", class->key); seq_printf(m, "%p", class->key);
#ifdef CONFIG_DEBUG_LOCKDEP #ifdef CONFIG_DEBUG_LOCKDEP
seq_printf(m, " OPS:%8ld", class->ops); seq_printf(m, " OPS:%8ld", class->ops);
...@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos) ...@@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos)
{ {
struct lock_stat_seq *data = m->private; struct lock_stat_seq *data = m->private;
if (data->iter == data->stats) if (*pos == 0)
seq_header(m); return SEQ_START_TOKEN;
if (data->iter == data->iter_end) data->iter = data->stats + *pos;
if (data->iter >= data->iter_end)
data->iter = NULL; data->iter = NULL;
return data->iter; return data->iter;
...@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos) ...@@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
(*pos)++; (*pos)++;
data->iter = v; if (v == SEQ_START_TOKEN)
data->iter++; data->iter = data->stats;
else {
data->iter = v;
data->iter++;
}
if (data->iter == data->iter_end) if (data->iter == data->iter_end)
data->iter = NULL; data->iter = NULL;
...@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v) ...@@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v)
static int ls_show(struct seq_file *m, void *v) static int ls_show(struct seq_file *m, void *v)
{ {
struct lock_stat_seq *data = m->private; if (v == SEQ_START_TOKEN)
seq_header(m);
else
seq_stats(m, v);
seq_stats(m, data->iter);
return 0; return 0;
} }
......
...@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) ...@@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
EXPORT_SYMBOL(__mutex_init); EXPORT_SYMBOL(__mutex_init);
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/* /*
* We split the mutex lock/unlock logic into separate fastpath and * We split the mutex lock/unlock logic into separate fastpath and
* slowpath functions, to reduce the register pressure on the fastpath. * slowpath functions, to reduce the register pressure on the fastpath.
...@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) ...@@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
} }
EXPORT_SYMBOL(mutex_lock); EXPORT_SYMBOL(mutex_lock);
#endif
static void fastcall noinline __sched static void fastcall noinline __sched
__mutex_unlock_slowpath(atomic_t *lock_count); __mutex_unlock_slowpath(atomic_t *lock_count);
...@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock); ...@@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock);
* Lock a mutex (possibly interruptible), slowpath: * Lock a mutex (possibly interruptible), slowpath:
*/ */
static inline int __sched static inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
unsigned long ip)
{ {
struct task_struct *task = current; struct task_struct *task = current;
struct mutex_waiter waiter; struct mutex_waiter waiter;
...@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) ...@@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
spin_lock_mutex(&lock->wait_lock, flags); spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_lock_common(lock, &waiter); debug_mutex_lock_common(lock, &waiter);
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); mutex_acquire(&lock->dep_map, subclass, 0, ip);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */ /* add waiting tasks to the end of the waitqueue (FIFO): */
...@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) ...@@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (old_val == 1) if (old_val == 1)
goto done; goto done;
lock_contended(&lock->dep_map, _RET_IP_); lock_contended(&lock->dep_map, ip);
for (;;) { for (;;) {
/* /*
...@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) ...@@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
if (unlikely(state == TASK_INTERRUPTIBLE && if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) { signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task_thread_info(task)); mutex_remove_waiter(lock, &waiter, task_thread_info(task));
mutex_release(&lock->dep_map, 1, _RET_IP_); mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter); debug_mutex_free_waiter(&waiter);
...@@ -197,20 +200,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) ...@@ -197,20 +200,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
return 0; return 0;
} }
static void fastcall noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __sched void __sched
mutex_lock_nested(struct mutex *lock, unsigned int subclass) mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{ {
might_sleep(); might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
} }
EXPORT_SYMBOL_GPL(mutex_lock_nested); EXPORT_SYMBOL_GPL(mutex_lock_nested);
...@@ -219,7 +214,7 @@ int __sched ...@@ -219,7 +214,7 @@ int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{ {
might_sleep(); might_sleep();
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
} }
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
...@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count) ...@@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
__mutex_unlock_common_slowpath(lock_count, 1); __mutex_unlock_common_slowpath(lock_count, 1);
} }
#ifndef CONFIG_DEBUG_LOCK_ALLOC
/* /*
* Here come the less common (and hence less performance-critical) APIs: * Here come the less common (and hence less performance-critical) APIs:
* mutex_lock_interruptible() and mutex_trylock(). * mutex_lock_interruptible() and mutex_trylock().
...@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) ...@@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible); EXPORT_SYMBOL(mutex_lock_interruptible);
static void fastcall noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
}
static int fastcall noinline __sched static int fastcall noinline __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count) __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{ {
struct mutex *lock = container_of(lock_count, struct mutex, count); struct mutex *lock = container_of(lock_count, struct mutex, count);
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
} }
#endif
/* /*
* Spinlock based trylock, we take the spinlock and check whether we * Spinlock based trylock, we take the spinlock and check whether we
......
...@@ -49,6 +49,14 @@ ...@@ -49,6 +49,14 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
EXPORT_SYMBOL_GPL(rcu_lock_map);
#endif
/* Definition for rcupdate control block. */ /* Definition for rcupdate control block. */
static struct rcu_ctrlblk rcu_ctrlblk = { static struct rcu_ctrlblk rcu_ctrlblk = {
.cur = -300, .cur = -300,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment