Commit 77ab58e5 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents 4da120fa 30ee59d7
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/ia32.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/signal.h> #include <asm/signal.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -141,27 +142,35 @@ GLOBAL_ENTRY(ia32_trace_syscall) ...@@ -141,27 +142,35 @@ GLOBAL_ENTRY(ia32_trace_syscall)
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
;; ;;
st8 [r2]=r3 // initialize return code to -ENOSYS st8 [r2]=r3 // initialize return code to -ENOSYS
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch syscall args br.call.sptk.few rp=syscall_trace // give parent a chance to catch syscall args
// Need to reload arguments (they may be changed by the tracing process) .ret2: // Need to reload arguments (they may be changed by the tracing process)
adds r2=IA64_PT_REGS_R9_OFFSET+16,sp // r2 = &pt_regs.r9 adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
mov r15=IA32_NR_syscalls
;;
ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET
movl r16=ia32_syscall_table
;; ;;
ld4 r33=[r2],8 // r9 == ecx ld4 r33=[r2],8 // r9 == ecx
ld4 r37=[r3],16 // r13 == ebp ld4 r37=[r3],16 // r13 == ebp
cmp.ltu.unc p6,p7=r8,r15
;; ;;
ld4 r34=[r2],8 // r10 == edx ld4 r34=[r2],8 // r10 == edx
ld4 r36=[r3],8 // r15 == edi ld4 r36=[r3],8 // r15 == edi
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
;;
ld8 r16=[r16]
;; ;;
ld4 r32=[r2],8 // r11 == ebx ld4 r32=[r2],8 // r11 == ebx
mov b6=r16
ld4 r35=[r3],8 // r14 == esi ld4 r35=[r3],8 // r14 == esi
;; br.call.sptk.few rp=b6 // do the syscall
.ret2: br.call.sptk.few rp=b6 // do the syscall
.ia32_strace_check_retval: .ia32_strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed? cmp.lt p6,p0=r8,r0 // syscall failed?
adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
;; ;;
st8.spill [r2]=r8 // store return value in slot for r8 st8.spill [r2]=r8 // store return value in slot for r8
br.call.sptk.few rp=invoke_syscall_trace // give parent a chance to catch return value br.call.sptk.few rp=syscall_trace // give parent a chance to catch return value
.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame .ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame
br.cond.sptk.many ia64_leave_kernel br.cond.sptk.many ia64_leave_kernel
END(ia32_trace_syscall) END(ia32_trace_syscall)
...@@ -199,7 +208,7 @@ END(sys32_fork) ...@@ -199,7 +208,7 @@ END(sys32_fork)
.align 8 .align 8
.globl ia32_syscall_table .globl ia32_syscall_table
ia32_syscall_table: ia32_syscall_table:
data8 sys32_ni_syscall /* 0 - old "setup(" system call*/ data8 sys_ni_syscall /* 0 - old "setup(" system call*/
data8 sys_exit data8 sys_exit
data8 sys32_fork data8 sys32_fork
data8 sys_read data8 sys_read
...@@ -216,25 +225,25 @@ ia32_syscall_table: ...@@ -216,25 +225,25 @@ ia32_syscall_table:
data8 sys_mknod data8 sys_mknod
data8 sys_chmod /* 15 */ data8 sys_chmod /* 15 */
data8 sys_lchown /* 16-bit version */ data8 sys_lchown /* 16-bit version */
data8 sys32_ni_syscall /* old break syscall holder */ data8 sys_ni_syscall /* old break syscall holder */
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys32_lseek data8 sys32_lseek
data8 sys_getpid /* 20 */ data8 sys_getpid /* 20 */
data8 sys_mount data8 sys_mount
data8 sys_oldumount data8 sys_oldumount
data8 sys_setuid /* 16-bit version */ data8 sys_setuid /* 16-bit version */
data8 sys_getuid /* 16-bit version */ data8 sys_getuid /* 16-bit version */
data8 sys32_ni_syscall /* sys_stime is not supported on IA64 */ /* 25 */ data8 sys_ni_syscall /* sys_stime is not supported on IA64 */ /* 25 */
data8 sys32_ptrace data8 sys32_ptrace
data8 sys32_alarm data8 sys32_alarm
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys32_pause data8 sys32_pause
data8 compat_sys_utime /* 30 */ data8 compat_sys_utime /* 30 */
data8 sys32_ni_syscall /* old stty syscall holder */ data8 sys_ni_syscall /* old stty syscall holder */
data8 sys32_ni_syscall /* old gtty syscall holder */ data8 sys_ni_syscall /* old gtty syscall holder */
data8 sys_access data8 sys_access
data8 sys_nice data8 sys_nice
data8 sys32_ni_syscall /* 35 */ /* old ftime syscall holder */ data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */
data8 sys_sync data8 sys_sync
data8 sys_kill data8 sys_kill
data8 sys_rename data8 sys_rename
...@@ -243,7 +252,7 @@ ia32_syscall_table: ...@@ -243,7 +252,7 @@ ia32_syscall_table:
data8 sys_dup data8 sys_dup
data8 sys32_pipe data8 sys32_pipe
data8 compat_sys_times data8 compat_sys_times
data8 sys32_ni_syscall /* old prof syscall holder */ data8 sys_ni_syscall /* old prof syscall holder */
data8 sys32_brk /* 45 */ data8 sys32_brk /* 45 */
data8 sys_setgid /* 16-bit version */ data8 sys_setgid /* 16-bit version */
data8 sys_getgid /* 16-bit version */ data8 sys_getgid /* 16-bit version */
...@@ -252,13 +261,13 @@ ia32_syscall_table: ...@@ -252,13 +261,13 @@ ia32_syscall_table:
data8 sys_getegid /* 16-bit version */ /* 50 */ data8 sys_getegid /* 16-bit version */ /* 50 */
data8 sys_acct data8 sys_acct
data8 sys_umount /* recycled never used phys( */ data8 sys_umount /* recycled never used phys( */
data8 sys32_ni_syscall /* old lock syscall holder */ data8 sys_ni_syscall /* old lock syscall holder */
data8 compat_sys_ioctl data8 compat_sys_ioctl
data8 compat_sys_fcntl /* 55 */ data8 compat_sys_fcntl /* 55 */
data8 sys32_ni_syscall /* old mpx syscall holder */ data8 sys_ni_syscall /* old mpx syscall holder */
data8 sys_setpgid data8 sys_setpgid
data8 sys32_ni_syscall /* old ulimit syscall holder */ data8 sys_ni_syscall /* old ulimit syscall holder */
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys_umask /* 60 */ data8 sys_umask /* 60 */
data8 sys_chroot data8 sys_chroot
data8 sys_ustat data8 sys_ustat
...@@ -267,8 +276,8 @@ ia32_syscall_table: ...@@ -267,8 +276,8 @@ ia32_syscall_table:
data8 sys_getpgrp /* 65 */ data8 sys_getpgrp /* 65 */
data8 sys_setsid data8 sys_setsid
data8 sys32_sigaction data8 sys32_sigaction
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys_setreuid /* 16-bit version */ /* 70 */ data8 sys_setreuid /* 16-bit version */ /* 70 */
data8 sys_setregid /* 16-bit version */ data8 sys_setregid /* 16-bit version */
data8 sys32_sigsuspend data8 sys32_sigsuspend
...@@ -283,7 +292,7 @@ ia32_syscall_table: ...@@ -283,7 +292,7 @@ ia32_syscall_table:
data8 sys32_setgroups16 data8 sys32_setgroups16
data8 sys32_old_select data8 sys32_old_select
data8 sys_symlink data8 sys_symlink
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys_readlink /* 85 */ data8 sys_readlink /* 85 */
data8 sys_uselib data8 sys_uselib
data8 sys_swapon data8 sys_swapon
...@@ -297,7 +306,7 @@ ia32_syscall_table: ...@@ -297,7 +306,7 @@ ia32_syscall_table:
data8 sys_fchown /* 16-bit version */ /* 95 */ data8 sys_fchown /* 16-bit version */ /* 95 */
data8 sys_getpriority data8 sys_getpriority
data8 sys_setpriority data8 sys_setpriority
data8 sys32_ni_syscall /* old profil syscall holder */ data8 sys_ni_syscall /* old profil syscall holder */
data8 compat_sys_statfs data8 compat_sys_statfs
data8 compat_sys_fstatfs /* 100 */ data8 compat_sys_fstatfs /* 100 */
data8 sys32_ioperm data8 sys32_ioperm
...@@ -308,11 +317,11 @@ ia32_syscall_table: ...@@ -308,11 +317,11 @@ ia32_syscall_table:
data8 compat_sys_newstat data8 compat_sys_newstat
data8 compat_sys_newlstat data8 compat_sys_newlstat
data8 compat_sys_newfstat data8 compat_sys_newfstat
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 sys32_iopl /* 110 */ data8 sys32_iopl /* 110 */
data8 sys_vhangup data8 sys_vhangup
data8 sys32_ni_syscall /* used to be sys_idle */ data8 sys_ni_syscall /* used to be sys_idle */
data8 sys32_ni_syscall data8 sys_ni_syscall
data8 compat_sys_wait4 data8 compat_sys_wait4
data8 sys_swapoff /* 115 */ data8 sys_swapoff /* 115 */
data8 sys32_sysinfo data8 sys32_sysinfo
...@@ -323,20 +332,20 @@ ia32_syscall_table: ...@@ -323,20 +332,20 @@ ia32_syscall_table:
data8 sys_setdomainname data8 sys_setdomainname
data8 sys32_newuname data8 sys32_newuname
data8 sys32_modify_ldt data8 sys32_modify_ldt
data8 sys32_ni_syscall /* adjtimex */ data8 sys_ni_syscall /* adjtimex */
data8 sys32_mprotect /* 125 */ data8 sys32_mprotect /* 125 */
data8 compat_sys_sigprocmask data8 compat_sys_sigprocmask
data8 sys32_ni_syscall /* create_module */ data8 sys_ni_syscall /* create_module */
data8 sys32_ni_syscall /* init_module */ data8 sys_ni_syscall /* init_module */
data8 sys32_ni_syscall /* delete_module */ data8 sys_ni_syscall /* delete_module */
data8 sys32_ni_syscall /* get_kernel_syms */ /* 130 */ data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */
data8 sys_quotactl data8 sys_quotactl
data8 sys_getpgid data8 sys_getpgid
data8 sys_fchdir data8 sys_fchdir
data8 sys32_ni_syscall /* sys_bdflush */ data8 sys_ni_syscall /* sys_bdflush */
data8 sys_sysfs /* 135 */ data8 sys_sysfs /* 135 */
data8 sys32_personality data8 sys32_personality
data8 sys32_ni_syscall /* for afs_syscall */ data8 sys_ni_syscall /* for afs_syscall */
data8 sys_setfsuid /* 16-bit version */ data8 sys_setfsuid /* 16-bit version */
data8 sys_setfsgid /* 16-bit version */ data8 sys_setfsgid /* 16-bit version */
data8 sys_llseek /* 140 */ data8 sys_llseek /* 140 */
...@@ -365,10 +374,10 @@ ia32_syscall_table: ...@@ -365,10 +374,10 @@ ia32_syscall_table:
data8 sys_mremap data8 sys_mremap
data8 sys_setresuid /* 16-bit version */ data8 sys_setresuid /* 16-bit version */
data8 sys32_getresuid16 /* 16-bit version */ /* 165 */ data8 sys32_getresuid16 /* 16-bit version */ /* 165 */
data8 sys32_ni_syscall /* vm86 */ data8 sys_ni_syscall /* vm86 */
data8 sys32_ni_syscall /* sys_query_module */ data8 sys_ni_syscall /* sys_query_module */
data8 sys_poll data8 sys_poll
data8 sys32_ni_syscall /* nfsservctl */ data8 sys_ni_syscall /* nfsservctl */
data8 sys_setresgid /* 170 */ data8 sys_setresgid /* 170 */
data8 sys32_getresgid16 data8 sys32_getresgid16
data8 sys_prctl data8 sys_prctl
...@@ -387,8 +396,8 @@ ia32_syscall_table: ...@@ -387,8 +396,8 @@ ia32_syscall_table:
data8 sys_capset /* 185 */ data8 sys_capset /* 185 */
data8 sys32_sigaltstack data8 sys32_sigaltstack
data8 sys32_sendfile data8 sys32_sendfile
data8 sys32_ni_syscall /* streams1 */ data8 sys_ni_syscall /* streams1 */
data8 sys32_ni_syscall /* streams2 */ data8 sys_ni_syscall /* streams2 */
data8 sys32_vfork /* 190 */ data8 sys32_vfork /* 190 */
data8 compat_sys_getrlimit data8 compat_sys_getrlimit
data8 sys32_mmap2 data8 sys32_mmap2
...@@ -469,10 +478,6 @@ ia32_syscall_table: ...@@ -469,10 +478,6 @@ ia32_syscall_table:
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_statfs64 data8 sys_statfs64
data8 sys_fstatfs64 data8 sys_fstatfs64
data8 sys_ni_syscall
/* // guard against failures to increase IA32_NR_syscalls
* CAUTION: If any system calls are added beyond this point .org ia32_syscall_table + 8*IA32_NR_syscalls
* then the check in `arch/ia64/kernel/ivt.S' will have
* to be modified also. You've been warned.
*/
...@@ -2164,19 +2164,6 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data, ...@@ -2164,19 +2164,6 @@ sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
return ret; return ret;
} }
asmlinkage long sys_ni_syscall(void);
asmlinkage long
sys32_ni_syscall (int dummy0, int dummy1, int dummy2, int dummy3, int dummy4, int dummy5,
int dummy6, int dummy7, int stack)
{
struct pt_regs *regs = (struct pt_regs *)&stack;
printk(KERN_WARNING "IA32 syscall #%d issued, maybe we should implement it\n",
(int)regs->r1);
return(sys_ni_syscall());
}
/* /*
* The IA64 maps 4 I/O ports for each 4K page * The IA64 maps 4 I/O ports for each 4K page
*/ */
......
...@@ -483,34 +483,11 @@ GLOBAL_ENTRY(clone) ...@@ -483,34 +483,11 @@ GLOBAL_ENTRY(clone)
br.ret.sptk.many rp br.ret.sptk.many rp
END(clone) END(clone)
/*
* We invoke syscall_trace through this intermediate function to
* ensure that the syscall input arguments are not clobbered. We
* also use it to preserve b6, which contains the syscall entry point.
*/
GLOBAL_ENTRY(invoke_syscall_trace)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,3,0,0
mov loc0=rp
.body
mov loc2=b6
;;
br.call.sptk.many rp=syscall_trace
.ret3: mov rp=loc0
mov ar.pfs=loc1
mov b6=loc2
br.ret.sptk.many rp
END(invoke_syscall_trace)
/* /*
* Invoke a system call, but do some tracing before and after the call. * Invoke a system call, but do some tracing before and after the call.
* We MUST preserve the current register frame throughout this routine * We MUST preserve the current register frame throughout this routine
* because some system calls (such as ia64_execve) directly * because some system calls (such as ia64_execve) directly
* manipulate ar.pfs. * manipulate ar.pfs.
*
* Input:
* r15 = syscall number
* b6 = syscall entry point
*/ */
.global ia64_strace_leave_kernel .global ia64_strace_leave_kernel
...@@ -522,21 +499,38 @@ GLOBAL_ENTRY(ia64_trace_syscall) ...@@ -522,21 +499,38 @@ GLOBAL_ENTRY(ia64_trace_syscall)
*/ */
nop.m 0 nop.m 0
nop.i 0 nop.i 0
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch syscall args br.call.sptk.many rp=syscall_trace // give parent a chance to catch syscall args
} }
.ret6: br.call.sptk.many rp=b6 // do the syscall // the syscall number may have changed, so re-load it and re-calculate the
strace_check_retval: // syscall entry-point:
adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
;;
ld8 r15=[r15]
mov r3=NR_syscalls - 1
;;
adds r15=-1024,r15
movl r16=sys_call_table
;;
shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
cmp.leu p6,p7=r15,r3
;;
(p6) ld8 r20=[r20] // load address of syscall entry point
(p7) movl r20=sys_ni_syscall
;;
mov b6=r20
br.call.sptk.many rp=b6 // do the syscall
.strace_check_retval:
cmp.lt p6,p0=r8,r0 // syscall failed? cmp.lt p6,p0=r8,r0 // syscall failed?
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
mov r10=0 mov r10=0
(p6) br.cond.sptk strace_error // syscall failed -> (p6) br.cond.sptk strace_error // syscall failed ->
;; // avoid RAW on r10 ;; // avoid RAW on r10
strace_save_retval: .strace_save_retval:
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8 .mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10 .mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
ia64_strace_leave_kernel: ia64_strace_leave_kernel:
br.call.sptk.many rp=invoke_syscall_trace // give parent a chance to catch return value br.call.sptk.many rp=syscall_trace // give parent a chance to catch return value
.rety: br.cond.sptk ia64_leave_syscall .rety: br.cond.sptk ia64_leave_syscall
strace_error: strace_error:
...@@ -548,7 +542,7 @@ strace_error: ...@@ -548,7 +542,7 @@ strace_error:
;; ;;
(p6) mov r10=-1 (p6) mov r10=-1
(p6) mov r8=r9 (p6) mov r8=r9
br.cond.sptk strace_save_retval br.cond.sptk .strace_save_retval
END(ia64_trace_syscall) END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone) GLOBAL_ENTRY(ia64_ret_from_clone)
...@@ -573,7 +567,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) ...@@ -573,7 +567,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
;; ;;
mov r8=0 mov r8=0
tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
(p6) br.cond.spnt strace_check_retval (p6) br.cond.spnt .strace_check_retval
;; // added stop bits to prevent r8 dependency ;; // added stop bits to prevent r8 dependency
END(ia64_ret_from_clone) END(ia64_ret_from_clone)
// fall through // fall through
...@@ -726,6 +720,7 @@ GLOBAL_ENTRY(ia64_leave_syscall) ...@@ -726,6 +720,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
mov b6=r22 // restore b6 mov b6=r22 // restore b6
shr.u r18=r19,16 // get byte size of existing "dirty" partition shr.u r18=r19,16 // get byte size of existing "dirty" partition
(pKStk) br.cond.dpnt.many skip_rbs_switch (pKStk) br.cond.dpnt.many skip_rbs_switch
(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
br.cond.sptk.many rbs_switch br.cond.sptk.many rbs_switch
END(ia64_leave_syscall) END(ia64_leave_syscall)
...@@ -1334,9 +1329,9 @@ sys_call_table: ...@@ -1334,9 +1329,9 @@ sys_call_table:
data8 sys_syslog data8 sys_syslog
data8 sys_setitimer data8 sys_setitimer
data8 sys_getitimer data8 sys_getitimer
data8 ia64_ni_syscall // 1120 /* was: ia64_oldstat */ data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
data8 ia64_ni_syscall /* was: ia64_oldlstat */ data8 sys_ni_syscall /* was: ia64_oldlstat */
data8 ia64_ni_syscall /* was: ia64_oldfstat */ data8 sys_ni_syscall /* was: ia64_oldfstat */
data8 sys_vhangup data8 sys_vhangup
data8 sys_lchown data8 sys_lchown
data8 sys_remap_file_pages // 1125 data8 sys_remap_file_pages // 1125
...@@ -1346,16 +1341,16 @@ sys_call_table: ...@@ -1346,16 +1341,16 @@ sys_call_table:
data8 sys_setdomainname data8 sys_setdomainname
data8 sys_newuname // 1130 data8 sys_newuname // 1130
data8 sys_adjtimex data8 sys_adjtimex
data8 ia64_ni_syscall /* was: ia64_create_module */ data8 sys_ni_syscall /* was: ia64_create_module */
data8 sys_init_module data8 sys_init_module
data8 sys_delete_module data8 sys_delete_module
data8 ia64_ni_syscall // 1135 /* was: sys_get_kernel_syms */ data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */
data8 ia64_ni_syscall /* was: sys_query_module */ data8 sys_ni_syscall /* was: sys_query_module */
data8 sys_quotactl data8 sys_quotactl
data8 sys_bdflush data8 sys_bdflush
data8 sys_sysfs data8 sys_sysfs
data8 sys_personality // 1140 data8 sys_personality // 1140
data8 ia64_ni_syscall // sys_afs_syscall data8 sys_ni_syscall // sys_afs_syscall
data8 sys_setfsuid data8 sys_setfsuid
data8 sys_setfsgid data8 sys_setfsgid
data8 sys_getdents data8 sys_getdents
...@@ -1473,26 +1468,26 @@ sys_call_table: ...@@ -1473,26 +1468,26 @@ sys_call_table:
data8 sys_clock_nanosleep data8 sys_clock_nanosleep
data8 sys_fstatfs64 data8 sys_fstatfs64
data8 sys_statfs64 data8 sys_statfs64
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall // 1260 data8 sys_ni_syscall // 1260
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall // 1265 data8 sys_ni_syscall // 1265
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall // 1270 data8 sys_ni_syscall // 1270
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall // 1275 data8 sys_ni_syscall // 1275
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
data8 ia64_ni_syscall data8 sys_ni_syscall
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Preserved registers that are shared between code in ivt.S and entry.S. Be * Preserved registers that are shared between code in ivt.S and entry.S. Be
* careful not to step on these! * careful not to step on these!
*/ */
#define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0*/ #define pLvSys p1 /* set 1 if leave from syscall; otherwise, set 0 */
#define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */ #define pKStk p2 /* will leave_{kernel,syscall} return to kernel-stacks? */
#define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */ #define pUStk p3 /* will leave_{kernel,syscall} return to user-stacks? */
#define pSys p4 /* are we processing a (synchronous) system call? */ #define pSys p4 /* are we processing a (synchronous) system call? */
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/break.h> #include <asm/break.h>
#include <asm/ia32.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/offsets.h> #include <asm/offsets.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -705,13 +706,14 @@ ENTRY(break_fault) ...@@ -705,13 +706,14 @@ ENTRY(break_fault)
movl r2=ia64_ret_from_syscall movl r2=ia64_ret_from_syscall
;; ;;
shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024) shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
cmp.geu p0,p7=r3,r15 // (syscall > 0 && syscall < 1024 + NR_syscalls) ? cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
mov rp=r2 // set the real return addr mov rp=r2 // set the real return addr
;; ;;
(p7) add r20=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall (p6) ld8 r20=[r20] // load address of syscall entry point
(p7) movl r20=sys_ni_syscall
add r2=TI_FLAGS+IA64_TASK_SIZE,r13 add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;; ;;
ld8 r20=[r20] // load address of syscall entry point
ld4 r2=[r2] // r2 = current_thread_info()->flags ld4 r2=[r2] // r2 = current_thread_info()->flags
;; ;;
tbit.z p8,p0=r2,TIF_SYSCALL_TRACE tbit.z p8,p0=r2,TIF_SYSCALL_TRACE
...@@ -1513,7 +1515,7 @@ ENTRY(dispatch_to_ia32_handler) ...@@ -1513,7 +1515,7 @@ ENTRY(dispatch_to_ia32_handler)
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;; ;;
ld4 r8=[r14],8 // r8 == eax (syscall number) ld4 r8=[r14],8 // r8 == eax (syscall number)
mov r15=270 // number of entries in ia32 system call table mov r15=IA32_NR_syscalls
;; ;;
cmp.ltu.unc p6,p7=r8,r15 cmp.ltu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx ld4 out1=[r14],8 // r9 == ecx
......
...@@ -114,7 +114,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -114,7 +114,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
pfm_default_smpl_hdr_t *hdr; pfm_default_smpl_hdr_t *hdr;
pfm_default_smpl_entry_t *ent; pfm_default_smpl_entry_t *ent;
void *cur, *last; void *cur, *last;
unsigned long *e; unsigned long *e, entry_size;
unsigned int npmds, i; unsigned int npmds, i;
unsigned char ovfl_pmd; unsigned char ovfl_pmd;
unsigned char ovfl_notify; unsigned char ovfl_notify;
...@@ -131,8 +131,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -131,8 +131,7 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
ovfl_notify = arg->ovfl_notify; ovfl_notify = arg->ovfl_notify;
/* /*
* check for space against largest possibly entry. * precheck for sanity
* We may waste space at the end of the buffer.
*/ */
if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full; if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
...@@ -142,6 +141,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -142,6 +141,8 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
prefetch(arg->smpl_pmds_values); prefetch(arg->smpl_pmds_values);
entry_size = sizeof(*ent) + (npmds << 3);
/* position for first pmd */ /* position for first pmd */
e = (unsigned long *)(ent+1); e = (unsigned long *)(ent+1);
...@@ -191,7 +192,13 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct ...@@ -191,7 +192,13 @@ default_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg, struct
/* /*
* update position for next entry * update position for next entry
*/ */
hdr->hdr_cur_offs += sizeof(*ent) + (npmds << 3); hdr->hdr_cur_offs += entry_size;
cur += entry_size;
/*
* post check to avoid losing the last sample
*/
if ((last - cur) < PFM_DEFAULT_MAX_ENTRY_SIZE) goto full;
/* /*
* keep same ovfl_pmds, ovfl_notify * keep same ovfl_pmds, ovfl_notify
......
...@@ -29,6 +29,11 @@ ...@@ -29,6 +29,11 @@
#include <asm/perfmon.h> #include <asm/perfmon.h>
#endif #endif
#include "entry.h"
#define p4 (1UL << 4) /* for pSys (see entry.h) */
#define p5 (1UL << 5) /* for pNonSys (see entry.h) */
/* /*
* Bits in the PSR that we allow ptrace() to change: * Bits in the PSR that we allow ptrace() to change:
* be, up, ac, mfl, mfh (the user mask; five bits total) * be, up, ac, mfl, mfh (the user mask; five bits total)
...@@ -51,6 +56,14 @@ ...@@ -51,6 +56,14 @@
# define dprintk(format...) # define dprintk(format...)
#endif #endif
/* Return TRUE if PT was created due to kernel-entry via a system-call. */
static inline int
in_syscall (struct pt_regs *pt)
{
return (long) pt->cr_ifs >= 0;
}
/* /*
* Collect the NaT bits for r1-r31 from scratch_unat and return a NaT * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
* bitset where bit i is set iff the NaT bit of register i is set. * bitset where bit i is set iff the NaT bit of register i is set.
...@@ -272,7 +285,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -272,7 +285,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
ubspstore = (unsigned long *) pt->ar_bspstore; ubspstore = (unsigned long *) pt->ar_bspstore;
urbs_kargs = urbs_end; urbs_kargs = urbs_end;
if ((long)pt->cr_ifs >= 0) { if (in_syscall(pt)) {
/* /*
* If entered via syscall, don't allow user to set rnat bits * If entered via syscall, don't allow user to set rnat bits
* for syscall args. * for syscall args.
...@@ -331,6 +344,13 @@ put_rnat (struct task_struct *task, struct switch_stack *sw, ...@@ -331,6 +344,13 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m)); *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
} }
static inline int
on_kernel_rbs (unsigned long addr, unsigned long bspstore, unsigned long urbs_end)
{
return (addr >= bspstore
&& addr <= (unsigned long) ia64_rse_rnat_addr((unsigned long *) urbs_end));
}
/* /*
* Read a word from the user-level backing store of task CHILD. ADDR is the user-level * Read a word from the user-level backing store of task CHILD. ADDR is the user-level
* address to read the word from, VAL a pointer to the return value, and USER_BSP gives * address to read the word from, VAL a pointer to the return value, and USER_BSP gives
...@@ -355,7 +375,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned ...@@ -355,7 +375,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned
child_regs = ia64_task_regs(child); child_regs = ia64_task_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore; bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) { if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) {
/* /*
* Attempt to read the RBS in an area that's actually on the kernel RBS => * Attempt to read the RBS in an area that's actually on the kernel RBS =>
* read the corresponding bits in the kernel RBS. * read the corresponding bits in the kernel RBS.
...@@ -406,7 +426,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned ...@@ -406,7 +426,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned
child_regs = ia64_task_regs(child); child_regs = ia64_task_regs(child);
bspstore = (unsigned long *) child_regs->ar_bspstore; bspstore = (unsigned long *) child_regs->ar_bspstore;
krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) { if (on_kernel_rbs(addr, (unsigned long) bspstore, (unsigned long) urbs_end)) {
/* /*
* Attempt to write the RBS in an area that's actually on the kernel RBS * Attempt to write the RBS in an area that's actually on the kernel RBS
* => write the corresponding bits in the kernel RBS. * => write the corresponding bits in the kernel RBS.
...@@ -443,7 +463,7 @@ ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned l ...@@ -443,7 +463,7 @@ ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned l
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
cfm = pt->cr_ifs & ~(1UL << 63); cfm = pt->cr_ifs & ~(1UL << 63);
if ((long) pt->cr_ifs >= 0) { if (in_syscall(pt)) {
/* /*
* If bit 63 of cr.ifs is cleared, the kernel was entered via a system * If bit 63 of cr.ifs is cleared, the kernel was entered via a system
* call and we need to recover the CFM that existed on entry to the * call and we need to recover the CFM that existed on entry to the
...@@ -483,134 +503,80 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, ...@@ -483,134 +503,80 @@ ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
return 0; return 0;
} }
/* static inline int
* Simulate user-level "flushrs". Note: we can't just add pt->loadrs>>16 to thread_matches (struct task_struct *thread, unsigned long addr)
* pt->ar_bspstore because the kernel backing store and the user-level backing store may
* have different alignments (and therefore a different number of intervening rnat slots).
*/
static void
user_flushrs (struct task_struct *task, struct pt_regs *pt)
{ {
unsigned long *krbs; unsigned long thread_rbs_end;
long ndirty; struct pt_regs *thread_regs;
krbs = (unsigned long *) task + IA64_RBS_OFFSET/8; if (ptrace_check_attach(thread, 0) < 0)
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19)); /*
* If the thread is not in an attachable state, we'll ignore it.
pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore, * The net effect is that if ADDR happens to overlap with the
ndirty); * portion of the thread's register backing store that is
pt->loadrs = 0; * currently residing on the thread's kernel stack, then ptrace()
} * may end up accessing a stale value. But if the thread isn't
* stopped, that's a problem anyhow, so we're doing as well as we
static inline void * can...
sync_user_rbs_one_thread (struct task_struct *p, int make_writable) */
{ return 0;
struct switch_stack *sw;
unsigned long urbs_end;
struct pt_regs *pt;
sw = (struct switch_stack *) (p->thread.ksp + 16);
pt = ia64_task_regs(p);
urbs_end = ia64_get_user_rbs_end(p, pt, NULL);
ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end);
if (make_writable)
user_flushrs(p, pt);
}
struct task_list {
struct task_list *next;
struct task_struct *task;
};
#ifdef CONFIG_SMP
static inline void
collect_task (struct task_list **listp, struct task_struct *p, int make_writable)
{
struct task_list *e;
e = kmalloc(sizeof(*e), GFP_KERNEL);
if (!e)
/* oops, can't collect more: finish at least what we collected so far... */
return;
get_task_struct(p);
e->task = p;
e->next = *listp;
*listp = e;
}
static inline struct task_list * thread_regs = ia64_task_regs(thread);
finish_task (struct task_list *list, int make_writable) thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
{ if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
struct task_list *next = list->next; return 0;
sync_user_rbs_one_thread(list->task, make_writable); return 1; /* looks like we've got a winner */
put_task_struct(list->task);
kfree(list);
return next;
} }
#else
# define collect_task(list, p, make_writable) sync_user_rbs_one_thread(p, make_writable)
# define finish_task(list, make_writable) (NULL)
#endif
/* /*
* Synchronize the RSE backing store of CHILD and all tasks that share the address space * GDB apparently wants to be able to read the register-backing store of any thread when
* with it. CHILD_URBS_END is the address of the end of the register backing store of * attached to a given process. If we are peeking or poking an address that happens to
* CHILD. If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM * reside in the kernel-backing store of another thread, we need to attach to that thread,
* can be written via ptrace() and the tasks will pick up the newly written values. It * because otherwise we end up accessing stale data.
* would be OK to unconditionally simulate a "flushrs", but this would be more intrusive *
* than strictly necessary (e.g., it would make it impossible to obtain the original value * task_list_lock must be read-locked before calling this routine!
* of ar.bspstore).
*/ */
static void static struct task_struct *
threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable) find_thread_for_addr (struct task_struct *child, unsigned long addr)
{ {
struct switch_stack *sw;
struct task_struct *g, *p; struct task_struct *g, *p;
struct mm_struct *mm; struct mm_struct *mm;
struct pt_regs *pt; int mm_users;
long multi_threaded;
task_lock(child); if (!(mm = get_task_mm(child)))
{ return child;
mm = child->mm;
multi_threaded = mm && (atomic_read(&mm->mm_users) > 1); mm_users = atomic_read(&mm->mm_users) - 1; /* -1 because of our get_task_mm()... */
} if (mm_users <= 1)
task_unlock(child); goto out; /* not multi-threaded */
if (!multi_threaded) {
sw = (struct switch_stack *) (child->thread.ksp + 16);
pt = ia64_task_regs(child);
ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end);
if (make_writable)
user_flushrs(child, pt);
} else {
/* /*
* Note: we can't call ia64_sync_user_rbs() while holding the * First, traverse the child's thread-list. Good for scalability with
* tasklist_lock because that may cause a dead-lock: ia64_sync_user_rbs() * NPTL-threads.
* may indirectly call tlb_flush_all(), which triggers an IPI.
* Furthermore, tasklist_lock is acquired by fork() with interrupts
* disabled, so with the right timing, the IPI never completes, hence
* tasklist_lock never gets released, hence fork() never completes...
*/ */
struct task_list *list = NULL; p = child;
do {
if (thread_matches(p, addr)) {
child = p;
goto out;
}
if (mm_users-- <= 1)
goto out;
} while ((p = next_thread(p)) != child);
read_lock(&tasklist_lock);
{
do_each_thread(g, p) { do_each_thread(g, p) {
if (p->mm == mm && p->state != TASK_RUNNING) if (child->mm != mm)
collect_task(&list, p, make_writable); continue;
} while_each_thread(g, p);
}
read_unlock(&tasklist_lock);
while (list) if (thread_matches(p, addr)) {
list = finish_task(list, make_writable); child = p;
goto out;
} }
child->thread.flags |= IA64_THREAD_KRBS_SYNCED; /* set the flag in the child thread only */ } while_each_thread(g, p);
out:
mmput(mm);
return child;
} }
/* /*
...@@ -668,12 +634,40 @@ access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, ...@@ -668,12 +634,40 @@ access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data,
return ret; return ret;
} }
/*
* Change the machine-state of CHILD such that it will return via the normal
* kernel exit-path, rather than the syscall-exit path.
*/
static void
convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, unsigned long cfm)
{
struct unw_frame_info info, prev_info;
unsigned long ip, pr;
unw_init_from_blocked_task(&info, child);
while (1) {
prev_info = info;
if (unw_unwind(&info) < 0)
return;
if (unw_get_rp(&info, &ip) < 0)
return;
if (ip < FIXADDR_USER_END)
break;
}
unw_get_pr(&prev_info, &pr);
pr &= ~pSys;
pr |= pNonSys;
unw_set_pr(&prev_info, pr);
pt->cr_ifs = (1UL << 63) | cfm;
}
static int static int
access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access) access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
{ {
unsigned long *ptr, regnum, urbs_end, rnat_addr; unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm;
struct switch_stack *sw; struct switch_stack *sw;
struct unw_frame_info info;
struct pt_regs *pt; struct pt_regs *pt;
pt = ia64_task_regs(child); pt = ia64_task_regs(child);
...@@ -778,13 +772,30 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -778,13 +772,30 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
* By convention, we use PT_AR_BSP to refer to the end of the user-level * By convention, we use PT_AR_BSP to refer to the end of the user-level
* backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get * backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get
* the real value of ar.bsp at the time the kernel was entered. * the real value of ar.bsp at the time the kernel was entered.
*
* Furthermore, when changing the contents of PT_AR_BSP (or
* PT_CFM) we MUST copy any users-level stacked registers that are
* stored on the kernel stack back to user-space because
* otherwise, we might end up clobbering kernel stacked registers.
* Also, if this happens while the task is blocked in a system
* call, which convert the state such that the non-system-call
* exit path is used. This ensures that the proper state will be
* picked up when resuming execution. However, it *also* means
* that once we write PT_AR_BSP/PT_CFM, it won't be possible to
* modify the syscall arguments of the pending system call any
* longer. This shouldn't be an issue because modifying
* PT_AR_BSP/PT_CFM generally implies that we're either abandoning
* the pending system call or that we defer it's re-execution
* (e.g., due to GDB doing an inferior function call).
*/ */
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) { if (write_access) {
if (*data != urbs_end) { if (*data != urbs_end) {
if (ia64_sync_user_rbs(child, sw, if (ia64_sync_user_rbs(child, sw,
pt->ar_bspstore, urbs_end) < 0) pt->ar_bspstore, urbs_end) < 0)
return -1; return -1;
if (in_syscall(pt))
convert_to_non_syscall(child, pt, cfm);
/* simulate user-level write of ar.bsp: */ /* simulate user-level write of ar.bsp: */
pt->loadrs = 0; pt->loadrs = 0;
pt->ar_bspstore = *data; pt->ar_bspstore = *data;
...@@ -794,27 +805,19 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data ...@@ -794,27 +805,19 @@ access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data
return 0; return 0;
case PT_CFM: case PT_CFM:
if ((long) pt->cr_ifs < 0) { urbs_end = ia64_get_user_rbs_end(child, pt, &cfm);
if (write_access) if (write_access) {
if (((cfm ^ *data) & 0x3fffffffffU) != 0) {
if (ia64_sync_user_rbs(child, sw,
pt->ar_bspstore, urbs_end) < 0)
return -1;
if (in_syscall(pt))
convert_to_non_syscall(child, pt, cfm);
pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL) pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
| (*data & 0x3fffffffffUL)); | (*data & 0x3fffffffffUL));
else
*data = pt->cr_ifs & 0x3fffffffffUL;
} else {
/* kernel was entered through a system call */
unsigned long cfm;
unw_init_from_blocked_task(&info, child);
if (unw_unwind_to_user(&info) < 0)
return -1;
unw_get_cfm(&info, &cfm);
if (write_access)
unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)
| (*data & 0x3fffffffffUL)));
else
*data = cfm;
} }
} else
*data = cfm;
return 0; return 0;
case PT_CR_IPSR: case PT_CR_IPSR:
...@@ -1240,9 +1243,6 @@ ptrace_disable (struct task_struct *child) ...@@ -1240,9 +1243,6 @@ ptrace_disable (struct task_struct *child)
/* make sure the single step/take-branch tra bits are not set: */ /* make sure the single step/take-branch tra bits are not set: */
child_psr->ss = 0; child_psr->ss = 0;
child_psr->tb = 0; child_psr->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
} }
asmlinkage long asmlinkage long
...@@ -1250,7 +1250,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1250,7 +1250,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
long arg4, long arg5, long arg6, long arg7, long stack) long arg4, long arg5, long arg6, long arg7, long stack)
{ {
struct pt_regs *pt, *regs = (struct pt_regs *) &stack; struct pt_regs *pt, *regs = (struct pt_regs *) &stack;
unsigned long urbs_end; unsigned long urbs_end, peek_or_poke;
struct task_struct *child; struct task_struct *child;
struct switch_stack *sw; struct switch_stack *sw;
long ret; long ret;
...@@ -1269,13 +1269,18 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1269,13 +1269,18 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
goto out; goto out;
} }
peek_or_poke = (request == PTRACE_PEEKTEXT || request == PTRACE_PEEKDATA
|| request == PTRACE_POKETEXT || request == PTRACE_POKEDATA);
ret = -ESRCH; ret = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
{ {
child = find_task_by_pid(pid); child = find_task_by_pid(pid);
if (child) if (child) {
if (peek_or_poke)
child = find_thread_for_addr(child, addr);
get_task_struct(child); get_task_struct(child);
} }
}
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!child) if (!child)
goto out; goto out;
...@@ -1299,10 +1304,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1299,10 +1304,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
case PTRACE_PEEKTEXT: case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA: /* read word at location addr */ case PTRACE_PEEKDATA: /* read word at location addr */
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
threads_sync_user_rbs(child, urbs_end, 0);
ret = ia64_peek(child, sw, urbs_end, addr, &data); ret = ia64_peek(child, sw, urbs_end, addr, &data);
if (ret == 0) { if (ret == 0) {
ret = data; ret = data;
...@@ -1313,9 +1314,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1313,9 +1314,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
case PTRACE_POKETEXT: case PTRACE_POKETEXT:
case PTRACE_POKEDATA: /* write the word at location addr */ case PTRACE_POKEDATA: /* write the word at location addr */
urbs_end = ia64_get_user_rbs_end(child, pt, NULL); urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
threads_sync_user_rbs(child, urbs_end, 1);
ret = ia64_poke(child, sw, urbs_end, addr, data); ret = ia64_poke(child, sw, urbs_end, addr, data);
goto out_tsk; goto out_tsk;
...@@ -1359,9 +1357,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1359,9 +1357,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ia64_psr(pt)->ss = 0; ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0; ia64_psr(pt)->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
wake_up_process(child); wake_up_process(child);
ret = 0; ret = 0;
goto out_tsk; goto out_tsk;
...@@ -1380,9 +1375,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1380,9 +1375,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
ia64_psr(pt)->ss = 0; ia64_psr(pt)->ss = 0;
ia64_psr(pt)->tb = 0; ia64_psr(pt)->tb = 0;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
wake_up_process(child); wake_up_process(child);
ret = 0; ret = 0;
goto out_tsk; goto out_tsk;
...@@ -1401,9 +1393,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1401,9 +1393,6 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
} }
child->exit_code = data; child->exit_code = data;
/* Turn off flag indicating that the KRBS is sync'd with child's VM: */
child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
/* give it a chance to run. */ /* give it a chance to run. */
wake_up_process(child); wake_up_process(child);
ret = 0; ret = 0;
...@@ -1432,7 +1421,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data, ...@@ -1432,7 +1421,9 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
return ret; return ret;
} }
void /* "asmlinkage" so the input arguments are preserved... */
asmlinkage void
syscall_trace (void) syscall_trace (void)
{ {
if (!test_thread_flag(TIF_SYSCALL_TRACE)) if (!test_thread_flag(TIF_SYSCALL_TRACE))
......
...@@ -538,6 +538,19 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -538,6 +538,19 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
if (!oldset) if (!oldset)
oldset = &current->blocked; oldset = &current->blocked;
/*
* This only loops in the rare cases of handle_signal() failing, in which case we
* need to push through a forced SIGSEGV.
*/
while (1) {
int signr = get_signal_to_deliver(&info, &scr->pt, NULL);
/*
* get_signal_to_deliver() may have run a debugger (via notify_parent())
* and the debugger may have modified the state (e.g., to arrange for an
* inferior call), thus it's important to check for restarting _after_
* get_signal_to_deliver().
*/
if (IS_IA32_PROCESS(&scr->pt)) { if (IS_IA32_PROCESS(&scr->pt)) {
if (in_syscall) { if (in_syscall) {
if (errno >= 0) if (errno >= 0)
...@@ -554,15 +567,12 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -554,15 +567,12 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
*/ */
restart = 0; restart = 0;
while (1) {
int signr = get_signal_to_deliver(&info, &scr->pt, NULL);
if (signr <= 0) if (signr <= 0)
break; break;
ka = &current->sighand->action[signr - 1]; ka = &current->sighand->action[signr - 1];
if (restart) { if (unlikely(restart)) {
switch (errno) { switch (errno) {
case ERESTART_RESTARTBLOCK: case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND: case ERESTARTNOHAND:
...@@ -582,6 +592,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall) ...@@ -582,6 +592,7 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
scr->pt.cr_iip -= 2; scr->pt.cr_iip -= 2;
} else } else
ia64_decrement_ip(&scr->pt); ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */
} }
} }
......
...@@ -215,21 +215,6 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs) ...@@ -215,21 +215,6 @@ ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
force_sig_info(sig, &siginfo, current); force_sig_info(sig, &siginfo, current);
} }
/*
* Unimplemented system calls. This is called only for stuff that
* we're supposed to implement but haven't done so yet. Everything
* else goes to sys_ni_syscall.
*
* XXX Remove this for v2.6.1.
*/
asmlinkage long
ia64_ni_syscall (unsigned long arg0, unsigned long arg1, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5, unsigned long arg6, unsigned long arg7,
unsigned long stack)
{
return -ENOSYS;
}
/* /*
* disabled_fph_fault() is called when a user-level process attempts to access f32..f127 * disabled_fph_fault() is called when a user-level process attempts to access f32..f127
* and it doesn't own the fp-high register partition. When this happens, we save the * and it doesn't own the fp-high register partition. When this happens, we save the
......
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/signal.h> #include <asm/signal.h>
#ifdef CONFIG_IA32_SUPPORT #define IA32_NR_syscalls 270 /* length of syscall table */
#ifndef __ASSEMBLY__
# ifdef CONFIG_IA32_SUPPORT
extern void ia32_cpu_init (void); extern void ia32_cpu_init (void);
extern void ia32_boot_gdt_init (void); extern void ia32_boot_gdt_init (void);
...@@ -15,10 +19,12 @@ extern int ia32_exception (struct pt_regs *regs, unsigned long isr); ...@@ -15,10 +19,12 @@ extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr); extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs); extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
#endif /* !CONFIG_IA32_SUPPORT */ # endif /* !CONFIG_IA32_SUPPORT */
/* Declare this unconditionally, so we don't get warnings for unreachable code. */ /* Declare this unconditionally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info, extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs); sigset_t *set, struct pt_regs *regs);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_IA32_H */ #endif /* _ASM_IA64_IA32_H */
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
#define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */ /* bit 5 is currently unused */
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */ #define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment