Commit 55f327fa authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] lockdep: irqtrace subsystem, i386 support

Add irqflags-tracing support to i386.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 55df314f
menu "Kernel hacking" menu "Kernel hacking"
config TRACE_IRQFLAGS_SUPPORT
bool
default y
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
config EARLY_PRINTK config EARLY_PRINTK
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/irqflags.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -76,12 +77,21 @@ NT_MASK = 0x00004000 ...@@ -76,12 +77,21 @@ NT_MASK = 0x00004000
VM_MASK = 0x00020000 VM_MASK = 0x00020000
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
#define preempt_stop cli #define preempt_stop cli; TRACE_IRQS_OFF
#else #else
#define preempt_stop #define preempt_stop
#define resume_kernel restore_nocheck #define resume_kernel restore_nocheck
#endif #endif
.macro TRACE_IRQS_IRET
#ifdef CONFIG_TRACE_IRQFLAGS
testl $IF_MASK,EFLAGS(%esp) # interrupts off?
jz 1f
TRACE_IRQS_ON
1:
#endif
.endm
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
#define resume_userspace_sig check_userspace #define resume_userspace_sig check_userspace
#else #else
...@@ -257,6 +267,10 @@ ENTRY(sysenter_entry) ...@@ -257,6 +267,10 @@ ENTRY(sysenter_entry)
CFI_REGISTER esp, ebp CFI_REGISTER esp, ebp
movl TSS_sysenter_esp0(%esp),%esp movl TSS_sysenter_esp0(%esp),%esp
sysenter_past_esp: sysenter_past_esp:
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
*/
sti sti
pushl $(__USER_DS) pushl $(__USER_DS)
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 4
...@@ -303,6 +317,7 @@ sysenter_past_esp: ...@@ -303,6 +317,7 @@ sysenter_past_esp:
call *sys_call_table(,%eax,4) call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp) movl %eax,EAX(%esp)
cli cli
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work jne syscall_exit_work
...@@ -310,6 +325,7 @@ sysenter_past_esp: ...@@ -310,6 +325,7 @@ sysenter_past_esp:
movl EIP(%esp), %edx movl EIP(%esp), %edx
movl OLDESP(%esp), %ecx movl OLDESP(%esp), %ecx
xorl %ebp,%ebp xorl %ebp,%ebp
TRACE_IRQS_ON
sti sti
sysexit sysexit
CFI_ENDPROC CFI_ENDPROC
...@@ -339,6 +355,7 @@ syscall_exit: ...@@ -339,6 +355,7 @@ syscall_exit:
cli # make sure we don't miss an interrupt cli # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
testw $_TIF_ALLWORK_MASK, %cx # current->work testw $_TIF_ALLWORK_MASK, %cx # current->work
jne syscall_exit_work jne syscall_exit_work
...@@ -355,12 +372,15 @@ restore_all: ...@@ -355,12 +372,15 @@ restore_all:
CFI_REMEMBER_STATE CFI_REMEMBER_STATE
je ldt_ss # returning to user-space with LDT SS je ldt_ss # returning to user-space with LDT SS
restore_nocheck: restore_nocheck:
TRACE_IRQS_IRET
restore_nocheck_notrace:
RESTORE_REGS RESTORE_REGS
addl $4, %esp addl $4, %esp
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -4
1: iret 1: iret
.section .fixup,"ax" .section .fixup,"ax"
iret_exc: iret_exc:
TRACE_IRQS_ON
sti sti
pushl $0 # no error code pushl $0 # no error code
pushl $do_iret_error pushl $do_iret_error
...@@ -386,11 +406,13 @@ ldt_ss: ...@@ -386,11 +406,13 @@ ldt_ss:
subl $8, %esp # reserve space for switch16 pointer subl $8, %esp # reserve space for switch16 pointer
CFI_ADJUST_CFA_OFFSET 8 CFI_ADJUST_CFA_OFFSET 8
cli cli
TRACE_IRQS_OFF
movl %esp, %eax movl %esp, %eax
/* Set up the 16bit stack frame with switch32 pointer on top, /* Set up the 16bit stack frame with switch32 pointer on top,
* and a switch16 pointer on top of the current frame. */ * and a switch16 pointer on top of the current frame. */
call setup_x86_bogus_stack call setup_x86_bogus_stack
CFI_ADJUST_CFA_OFFSET -8 # frame has moved CFI_ADJUST_CFA_OFFSET -8 # frame has moved
TRACE_IRQS_IRET
RESTORE_REGS RESTORE_REGS
lss 20+4(%esp), %esp # switch to 16bit stack lss 20+4(%esp), %esp # switch to 16bit stack
1: iret 1: iret
...@@ -411,6 +433,7 @@ work_resched: ...@@ -411,6 +433,7 @@ work_resched:
cli # make sure we don't miss an interrupt cli # make sure we don't miss an interrupt
# setting need_resched or sigpending # setting need_resched or sigpending
# between sampling and the iret # between sampling and the iret
TRACE_IRQS_OFF
movl TI_flags(%ebp), %ecx movl TI_flags(%ebp), %ecx
andl $_TIF_WORK_MASK, %ecx # is there any work to be done other andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
# than syscall tracing? # than syscall tracing?
...@@ -462,6 +485,7 @@ syscall_trace_entry: ...@@ -462,6 +485,7 @@ syscall_trace_entry:
syscall_exit_work: syscall_exit_work:
testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
jz work_pending jz work_pending
TRACE_IRQS_ON
sti # could let do_syscall_trace() call sti # could let do_syscall_trace() call
# schedule() instead # schedule() instead
movl %esp, %eax movl %esp, %eax
...@@ -535,9 +559,14 @@ ENTRY(irq_entries_start) ...@@ -535,9 +559,14 @@ ENTRY(irq_entries_start)
vector=vector+1 vector=vector+1
.endr .endr
/*
* the CPU automatically disables interrupts when executing an IRQ vector,
* so IRQ-flags tracing has to follow that:
*/
ALIGN ALIGN
common_interrupt: common_interrupt:
SAVE_ALL SAVE_ALL
TRACE_IRQS_OFF
movl %esp,%eax movl %esp,%eax
call do_IRQ call do_IRQ
jmp ret_from_intr jmp ret_from_intr
...@@ -549,9 +578,10 @@ ENTRY(name) \ ...@@ -549,9 +578,10 @@ ENTRY(name) \
pushl $~(nr); \ pushl $~(nr); \
CFI_ADJUST_CFA_OFFSET 4; \ CFI_ADJUST_CFA_OFFSET 4; \
SAVE_ALL; \ SAVE_ALL; \
TRACE_IRQS_OFF \
movl %esp,%eax; \ movl %esp,%eax; \
call smp_/**/name; \ call smp_/**/name; \
jmp ret_from_intr; \ jmp ret_from_intr; \
CFI_ENDPROC CFI_ENDPROC
/* The include is where all of the SMP etc. interrupts come from */ /* The include is where all of the SMP etc. interrupts come from */
...@@ -726,7 +756,7 @@ nmi_stack_correct: ...@@ -726,7 +756,7 @@ nmi_stack_correct:
xorl %edx,%edx # zero error code xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer movl %esp,%eax # pt_regs pointer
call do_nmi call do_nmi
jmp restore_all jmp restore_nocheck_notrace
CFI_ENDPROC CFI_ENDPROC
nmi_stack_fixup: nmi_stack_fixup:
......
...@@ -166,7 +166,7 @@ void irq_ctx_init(int cpu) ...@@ -166,7 +166,7 @@ void irq_ctx_init(int cpu)
irqctx->tinfo.task = NULL; irqctx->tinfo.task = NULL;
irqctx->tinfo.exec_domain = NULL; irqctx->tinfo.exec_domain = NULL;
irqctx->tinfo.cpu = cpu; irqctx->tinfo.cpu = cpu;
irqctx->tinfo.preempt_count = SOFTIRQ_OFFSET; irqctx->tinfo.preempt_count = 0;
irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
softirq_ctx[cpu] = irqctx; softirq_ctx[cpu] = irqctx;
...@@ -211,6 +211,10 @@ asmlinkage void do_softirq(void) ...@@ -211,6 +211,10 @@ asmlinkage void do_softirq(void)
: "0"(isp) : "0"(isp)
: "memory", "cc", "edx", "ecx", "eax" : "memory", "cc", "edx", "ecx", "eax"
); );
/*
* Shouldnt happen, we returned above if in_interrupt():
*/
WARN_ON_ONCE(softirq_count());
} }
local_irq_restore(flags); local_irq_restore(flags);
......
/*
* include/asm-i386/irqflags.h
*
* IRQ flags handling
*
* This file gets included from lowlevel asm headers too, to provide
* wrapped versions of the local_irq_*() APIs, based on the
* raw_local_irq_*() macros from the lowlevel headers.
*/
#ifndef _ASM_IRQFLAGS_H
#define _ASM_IRQFLAGS_H
#define raw_local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
#define raw_local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
#define raw_local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define raw_local_irq_enable() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define raw_safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* used when interrupts are already enabled or to shutdown the processor */
#define halt() __asm__ __volatile__("hlt": : :"memory")
#define raw_irqs_disabled_flags(flags) (!((flags) & (1<<9)))
/* For spinlocks etc */
#define raw_local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
/*
* Do the CPU's IRQ-state tracing from assembly code. We call a
* C function, so save all the C-clobbered registers:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
# define TRACE_IRQS_ON \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_on; \
popl %edx; \
popl %ecx; \
popl %eax;
# define TRACE_IRQS_OFF \
pushl %eax; \
pushl %ecx; \
pushl %edx; \
call trace_hardirqs_off; \
popl %edx; \
popl %ecx; \
popl %eax;
#else
# define TRACE_IRQS_ON
# define TRACE_IRQS_OFF
#endif
#endif
...@@ -31,6 +31,11 @@ ...@@ -31,6 +31,11 @@
"jmp 1b\n" \ "jmp 1b\n" \
"3:\n\t" "3:\n\t"
/*
* NOTE: there's an irqs-on section here, which normally would have to be
* irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use
* __raw_spin_lock_string_flags().
*/
#define __raw_spin_lock_string_flags \ #define __raw_spin_lock_string_flags \
"\n1:\t" \ "\n1:\t" \
"lock ; decb %0\n\t" \ "lock ; decb %0\n\t" \
......
...@@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l ...@@ -456,25 +456,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l
#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
/* interrupt control.. */ #include <linux/irqflags.h>
#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
#define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* used when interrupts are already enabled or to shutdown the processor */
#define halt() __asm__ __volatile__("hlt": : :"memory")
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
!(flags & (1<<9)); \
})
/* For spinlocks etc */
#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
/* /*
* disable hlt during certain critical i/o operations * disable hlt during certain critical i/o operations
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment