Commit eceb1383 authored by Ingo Molnar's avatar Ingo Molnar

Merge branches 'core/signal' and 'x86/spinlocks' into x86/xen

Conflicts:
	include/asm-x86/spinlock.h
......@@ -113,11 +113,6 @@ typedef struct siginfo {
#undef NSIGSEGV
#define NSIGSEGV 3
/*
* SIGTRAP si_codes
*/
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
#undef NSIGTRAP
#define NSIGTRAP 4
......
......@@ -15,11 +15,6 @@
#include <asm-generic/siginfo.h>
/*
* SIGTRAP si_codes
*/
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
#undef NSIGTRAP
#define NSIGTRAP 4
......
......@@ -351,31 +351,28 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
savesegment(es, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->es);
err |= __put_user((u32)regs->di, &sc->di);
err |= __put_user((u32)regs->si, &sc->si);
err |= __put_user((u32)regs->bp, &sc->bp);
err |= __put_user((u32)regs->sp, &sc->sp);
err |= __put_user((u32)regs->bx, &sc->bx);
err |= __put_user((u32)regs->dx, &sc->dx);
err |= __put_user((u32)regs->cx, &sc->cx);
err |= __put_user((u32)regs->ax, &sc->ax);
err |= __put_user((u32)regs->cs, &sc->cs);
err |= __put_user((u32)regs->ss, &sc->ss);
err |= __put_user(regs->di, &sc->di);
err |= __put_user(regs->si, &sc->si);
err |= __put_user(regs->bp, &sc->bp);
err |= __put_user(regs->sp, &sc->sp);
err |= __put_user(regs->bx, &sc->bx);
err |= __put_user(regs->dx, &sc->dx);
err |= __put_user(regs->cx, &sc->cx);
err |= __put_user(regs->ax, &sc->ax);
err |= __put_user(regs->cs, &sc->cs);
err |= __put_user(regs->ss, &sc->ss);
err |= __put_user(current->thread.trap_no, &sc->trapno);
err |= __put_user(current->thread.error_code, &sc->err);
err |= __put_user((u32)regs->ip, &sc->ip);
err |= __put_user((u32)regs->flags, &sc->flags);
err |= __put_user((u32)regs->sp, &sc->sp_at_signal);
err |= __put_user(regs->ip, &sc->ip);
err |= __put_user(regs->flags, &sc->flags);
err |= __put_user(regs->sp, &sc->sp_at_signal);
tmp = save_i387_xstate_ia32(fpstate);
if (tmp < 0)
err = -EFAULT;
else {
clear_used_math();
stts();
else
err |= __put_user(ptr_to_compat(tmp ? fpstate : NULL),
&sc->fpstate);
}
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
......@@ -444,21 +441,18 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
return -EFAULT;
err |= __put_user(sig, &frame->sig);
if (err)
goto give_sigsegv;
if (__put_user(sig, &frame->sig))
return -EFAULT;
err |= ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]);
if (err)
goto give_sigsegv;
if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
return -EFAULT;
if (_COMPAT_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
if (err)
goto give_sigsegv;
if (__copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)))
return -EFAULT;
}
if (ka->sa.sa_flags & SA_RESTORER) {
......@@ -479,7 +473,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
*/
err |= __copy_to_user(frame->retcode, &code, 8);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
......@@ -502,10 +496,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
......@@ -533,14 +523,14 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
return -EFAULT;
err |= __put_user(sig, &frame->sig);
err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
err |= copy_siginfo_to_user32(&frame->info, info);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Create the ucontext. */
if (cpu_has_xsave)
......@@ -556,7 +546,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err)
goto give_sigsegv;
return -EFAULT;
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
......@@ -571,7 +561,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/
err |= __copy_to_user(frame->retcode, &code, 8);
if (err)
goto give_sigsegv;
return -EFAULT;
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
......@@ -599,8 +589,4 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
#endif
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return -EFAULT;
}
......@@ -736,12 +736,12 @@ unsigned long get_wchan(struct task_struct *p)
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.sp);
do {
if (fp < (unsigned long)stack ||
fp > (unsigned long)stack+THREAD_SIZE)
fp >= (unsigned long)stack+THREAD_SIZE)
return 0;
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
......
......@@ -1452,7 +1452,8 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
#endif
}
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code, int si_code)
{
struct siginfo info;
......@@ -1461,7 +1462,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
memset(&info, 0, sizeof(info));
info.si_signo = SIGTRAP;
info.si_code = TRAP_BRKPT;
info.si_code = si_code;
/* User-mode ip? */
info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
......@@ -1548,5 +1549,5 @@ asmregparm void syscall_trace_leave(struct pt_regs *regs)
*/
if (test_thread_flag(TIF_SINGLESTEP) &&
tracehook_consider_fatal_signal(current, SIGTRAP, SIG_DFL))
send_sigtrap(current, regs, 0);
send_sigtrap(current, regs, 0, TRAP_BRKPT);
}
This diff is collapsed.
This diff is collapsed.
......@@ -891,6 +891,7 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk = current;
unsigned int condition;
int si_code;
trace_hardirqs_fixup();
......@@ -935,8 +936,9 @@ void __kprobes do_debug(struct pt_regs *regs, long error_code)
goto clear_TF_reenable;
}
si_code = get_si_code((unsigned long)condition);
/* Ok, finally something we can handle */
send_sigtrap(tsk, regs, error_code);
send_sigtrap(tsk, regs, error_code, si_code);
/*
* Disable additional traps. They'll be re-enabled when
......
......@@ -940,7 +940,7 @@ asmlinkage void __kprobes do_debug(struct pt_regs *regs,
tsk->thread.error_code = error_code;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_code = get_si_code(condition);
info.si_addr = user_mode(regs) ? (void __user *)regs->ip : NULL;
force_sig_info(SIGTRAP, &info, tsk);
......
......@@ -172,8 +172,8 @@ SECTIONS
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
*(.x86_cpu_dev.init)
}
SECURITY_INIT
__x86_cpu_dev_end = .;
SECURITY_INIT
. = ALIGN(8);
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
......
......@@ -614,7 +614,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
size = PAGE_ALIGN(last_addr) - phys_addr;
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
/*
* Mappings have to fit in the FIX_BTMAP area.
......
......@@ -199,6 +199,8 @@ typedef struct siginfo {
*/
#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
#define NSIGTRAP 2
/*
......
......@@ -3,11 +3,6 @@
#include <asm-generic/siginfo.h>
/*
* SIGTRAP si_codes
*/
#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint or watchpoint */
#undef NSIGTRAP
#define NSIGTRAP 4
......
......@@ -177,11 +177,11 @@ convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
#ifdef CONFIG_X86_32
extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
int error_code);
#else
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int error_code, int si_code);
#endif
void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
extern long syscall_trace_enter(struct pt_regs *);
extern void syscall_trace_leave(struct pt_regs *);
......
......@@ -21,8 +21,10 @@
#ifdef CONFIG_X86_32
# define LOCK_PTR_REG "a"
# define REG_PTR_MODE "k"
#else
# define LOCK_PTR_REG "D"
# define REG_PTR_MODE "q"
#endif
#if defined(CONFIG_X86_32) && \
......@@ -54,19 +56,7 @@
* much between them in performance though, especially as locks are out of line.
*/
#if (NR_CPUS < 256)
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return (((tmp >> 8) & 0xff) != (tmp & 0xff));
}
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return (((tmp >> 8) - tmp) & 0xff) > 1;
}
#define TICKET_SHIFT 8
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
......@@ -89,19 +79,17 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
{
int tmp;
short new;
int tmp, new;
asm volatile("movw %2,%w0\n\t"
asm volatile("movzwl %2, %0\n\t"
"cmpb %h0,%b0\n\t"
"leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
"movw %w0,%w1\n\t"
"incb %h1\n\t"
LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
......@@ -116,19 +104,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
: "memory", "cc");
}
#else
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return (((tmp >> 16) & 0xffff) != (tmp & 0xffff));
}
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return (((tmp >> 16) - tmp) & 0xffff) > 1;
}
#define TICKET_SHIFT 16
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
......@@ -146,7 +122,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */
"jmp 1b\n"
"2:"
: "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
: "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
:
: "memory", "cc");
}
......@@ -160,13 +136,13 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"movl %0,%1\n\t"
"roll $16, %0\n\t"
"cmpl %0,%1\n\t"
"leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
"jne 1f\n\t"
"addl $0x00010000, %1\n\t"
LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
: "=&a" (tmp), "=r" (new), "+m" (lock->slock)
: "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
:
: "memory", "cc");
......@@ -182,6 +158,20 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
}
#endif
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
#ifdef CONFIG_PARAVIRT
/*
* Define virtualization-friendly old-style lock byte lock, for use in
......
#ifndef ASM_X86__TRAPS_H
#define ASM_X86__TRAPS_H
#include <asm/debugreg.h>
/* Common in X86_32 and X86_64 */
asmlinkage void divide_error(void);
asmlinkage void debug(void);
......@@ -36,6 +38,16 @@ void do_invalid_op(struct pt_regs *, long);
void do_general_protection(struct pt_regs *, long);
void do_nmi(struct pt_regs *, long);
static inline int get_si_code(unsigned long condition)
{
if (condition & DR_STEP)
return TRAP_TRACE;
else if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3))
return TRAP_HWBKPT;
else
return TRAP_BRKPT;
}
extern int panic_on_unrecovered_nmi;
extern int kstack_depth_to_print;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment