Commit 32974ad4 authored by Tony Luck's avatar Tony Luck

[IA64] Remove COMPAT_IA32 support

This has been broken since May 2008 when Al Viro killed altroot support.
Since nobody has complained, it would appear that there are no users of
this code (A plausible theory since the main OSVs that support ia64 prefer
to use the IA32-EL software emulation).
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 6339204e
...@@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT ...@@ -499,23 +499,6 @@ config ARCH_PROC_KCORE_TEXT
def_bool y def_bool y
depends on PROC_KCORE depends on PROC_KCORE
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
IA-64 processors can execute IA-32 (X86) instructions. By
saying Y here, the kernel will include IA-32 system call
emulation support which makes it possible to transparently
run IA-32 Linux binaries on an IA-64 Linux system.
If in doubt, say Y.
config COMPAT
bool
depends on IA32_SUPPORT
default y
config COMPAT_FOR_U64_ALIGNMENT
def_bool COMPAT
config IA64_MCA_RECOVERY config IA64_MCA_RECOVERY
tristate "MCA recovery from errors other than TLB." tristate "MCA recovery from errors other than TLB."
......
...@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o ...@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
libs-y += arch/ia64/lib/ libs-y += arch/ia64/lib/
core-y += arch/ia64/kernel/ arch/ia64/mm/ core-y += arch/ia64/kernel/ arch/ia64/mm/
core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
......
...@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y ...@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
# CONFIG_VIRTUAL_MEM_MAP is not set # CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
# CONFIG_IA64_MCA_RECOVERY is not set # CONFIG_IA64_MCA_RECOVERY is not set
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y ...@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_COMPAT_FOR_U64_ALIGNMENT=y CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
......
...@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_NUMA=y CONFIG_NUMA=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y ...@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_FLATMEM_ENABLE=y CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SPARSEMEM_ENABLE=y
# CONFIG_VIRTUAL_MEM_MAP is not set # CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
# CONFIG_IA64_MCA_RECOVERY is not set # CONFIG_IA64_MCA_RECOVERY is not set
# CONFIG_PERFMON is not set # CONFIG_PERFMON is not set
CONFIG_IA64_PALINFO=m CONFIG_IA64_PALINFO=m
......
...@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
# CONFIG_IA32_SUPPORT is not set
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y ...@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
# CONFIG_IA32_SUPPORT is not set
# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
...@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y ...@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_VIRTUAL_MEM_MAP=y CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y CONFIG_HOLES_IN_ZONE=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
CONFIG_IA64_MCA_RECOVERY=y CONFIG_IA64_MCA_RECOVERY=y
CONFIG_PERFMON=y CONFIG_PERFMON=y
CONFIG_IA64_PALINFO=y CONFIG_IA64_PALINFO=y
......
#
# Makefile for the ia32 kernel emulation subsystem.
#
obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
obj-$(CONFIG_AUDIT) += audit.o
# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
#include "../../x86/include/asm/unistd_32.h"
unsigned ia32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned ia32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned ia32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned ia32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned ia32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int ia32_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_socketcall:
return 4;
case __NR_execve:
return 5;
default:
return 1;
}
}
/*
* IA-32 ELF support.
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
* 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
* 09/14/01 D. Mosberger fixed memory management for gdt/tss page
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/security.h>
#include <asm/param.h>
#include <asm/signal.h>
#include "ia32priv.h"
#include "elfcore32.h"
/* Override some function names */
#undef start_thread
#define start_thread ia32_start_thread
#define elf_format elf32_format
#define init_elf_binfmt init_elf32_binfmt
#define exit_elf_binfmt exit_elf32_binfmt
#undef CLOCKS_PER_SEC
#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC
extern void ia64_elf32_init (struct pt_regs *regs);
static void elf32_set_personality (void);
static unsigned long __attribute ((unused))
randomize_stack_top(unsigned long stack_top);
#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec)
#define elf_map elf32_map
#undef SET_PERSONALITY
#define SET_PERSONALITY(ex) elf32_set_personality()
#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
/* Ugly but avoids duplication */
#include "../../../fs/binfmt_elf.c"
extern struct page *ia32_shared_page[];
extern unsigned long *ia32_gdt;
extern struct page *ia32_gate_page;
int
ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf)
{
vmf->page = ia32_shared_page[smp_processor_id()];
get_page(vmf->page);
return 0;
}
int
ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
{
vmf->page = ia32_gate_page;
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct ia32_shared_page_vm_ops = {
.fault = ia32_install_shared_page
};
static const struct vm_operations_struct ia32_gate_page_vm_ops = {
.fault = ia32_install_gate_page
};
void
ia64_elf32_init (struct pt_regs *regs)
{
struct vm_area_struct *vma;
/*
* Map GDT below 4GB, where the processor can find it. We need to map
* it with privilege level 3 because the IVE uses non-privileged accesses to these
* tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_GDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
vma->vm_ops = &ia32_shared_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
/*
* When user stack is not executable, push sigreturn code to stack makes
* segmentation fault raised when returning to kernel. So now sigreturn
* code is locked in specific gate page, which is pointed by pretcode
* when setup_frame_ia32
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_GATE_OFFSET;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = PAGE_COPY_EXEC;
vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
| VM_MAYEXEC | VM_RESERVED;
vma->vm_ops = &ia32_gate_page_vm_ops;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
/*
* Install LDT as anonymous memory. This gives us all-zero segment descriptors
* until a task modifies them via modify_ldt().
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
vma->vm_mm = current->mm;
vma->vm_start = IA32_LDT_OFFSET;
vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
down_write(&current->mm->mmap_sem);
{
if (insert_vm_struct(current->mm, vma)) {
kmem_cache_free(vm_area_cachep, vma);
up_write(&current->mm->mmap_sem);
BUG();
}
}
up_write(&current->mm->mmap_sem);
}
ia64_psr(regs)->ac = 0; /* turn off alignment checking */
regs->loadrs = 0;
/*
* According to the ABI %edx points to an `atexit' handler. Since we don't have
* one we'll set it to 0 and initialize all the other registers just to make
* things more deterministic, ala the i386 implementation.
*/
regs->r8 = 0; /* %eax */
regs->r11 = 0; /* %ebx */
regs->r9 = 0; /* %ecx */
regs->r10 = 0; /* %edx */
regs->r13 = 0; /* %ebp */
regs->r14 = 0; /* %esi */
regs->r15 = 0; /* %edi */
current->thread.eflag = IA32_EFLAG;
current->thread.fsr = IA32_FSR_DEFAULT;
current->thread.fcr = IA32_FCR_DEFAULT;
current->thread.fir = 0;
current->thread.fdr = 0;
/*
* Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor
* format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
* architecture manual. Also note that the only fields that are not ignored are
* `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
*/
regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
0, 0, 0, 1, 0, 0, 0));
/* Setup the segment selectors */
regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
ia32_load_segment_descriptors(current);
ia32_load_state(current);
}
/*
* Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
* will suffer infinite self recursion.
*/
#undef setup_arg_pages
int
ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
{
int ret;
ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
if (!ret) {
/*
* Can't do it in ia64_elf32_init(). Needs to be done before
* calls to elf32_map()
*/
current->thread.ppl = ia32_init_pp_list();
}
return ret;
}
static void
elf32_set_personality (void)
{
set_personality(PER_LINUX32);
current->thread.map_base = IA32_PAGE_OFFSET/3;
}
static unsigned long
elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt,
int prot, int type, unsigned long unused)
{
unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
eppnt->p_offset - pgoff);
}
#define cpu_uses_ia32el() (local_cpu_data->family > 0x1f)
static int __init check_elf32_binfmt(void)
{
if (cpu_uses_ia32el()) {
printk("Please use IA-32 EL for executing IA-32 binaries\n");
unregister_binfmt(&elf_format);
}
return 0;
}
module_init(check_elf32_binfmt)
/*
* IA-32 ELF core dump support.
*
* Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
*
* Derived from the x86_64 version
*/
#ifndef _ELFCORE32_H_
#define _ELFCORE32_H_
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct ia32_user_i387_struct elf_fpregset_t;
typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
struct elf_siginfo
{
int si_signo; /* signal number */
int si_code; /* extra code */
int si_errno; /* errno */
};
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Hacks are here since types between compat_timeval (= pair of s32) and
* ia64-native timeval (= pair of s64) are not compatible, at least a file
* arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
* use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
*/
#define cputime_to_timeval(a,b) \
do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
#else
#define jiffies_to_timeval(a,b) \
do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
#endif
struct elf_prstatus
{
struct elf_siginfo pr_info; /* Info associated with signal */
short pr_cursig; /* Current signal */
unsigned int pr_sigpend; /* Set of pending signals */
unsigned int pr_sighold; /* Set of held signals */
pid_t pr_pid;
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
struct compat_timeval pr_utime; /* User time */
struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime; /* Cumulative user time */
struct compat_timeval pr_cstime; /* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
#define ELF_PRARGSZ (80) /* Number of chars for args */
struct elf_prpsinfo
{
char pr_state; /* numeric process state */
char pr_sname; /* char for pr_state */
char pr_zomb; /* zombie */
char pr_nice; /* nice val */
unsigned int pr_flag; /* flags */
__u16 pr_uid;
__u16 pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
pr_reg[0] = regs->r11; \
pr_reg[1] = regs->r9; \
pr_reg[2] = regs->r10; \
pr_reg[3] = regs->r14; \
pr_reg[4] = regs->r15; \
pr_reg[5] = regs->r13; \
pr_reg[6] = regs->r8; \
pr_reg[7] = regs->r16 & 0xffff; \
pr_reg[8] = (regs->r16 >> 16) & 0xffff; \
pr_reg[9] = (regs->r16 >> 32) & 0xffff; \
pr_reg[10] = (regs->r16 >> 48) & 0xffff; \
pr_reg[11] = regs->r1; \
pr_reg[12] = regs->cr_iip; \
pr_reg[13] = regs->r17 & 0xffff; \
pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
pr_reg[15] = regs->r12; \
pr_reg[16] = (regs->r17 >> 16) & 0xffff;
static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
struct pt_regs *regs)
{
ELF_CORE_COPY_REGS((*elfregs), regs)
}
static inline int elf_core_copy_task_regs(struct task_struct *t,
elf_gregset_t* elfregs)
{
ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
return 1;
}
static inline int
elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
{
struct ia32_user_i387_struct *fpstate = (void*)fpu;
mm_segment_t old_fs;
if (!tsk_used_math(tsk))
return 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate);
set_fs(old_fs);
return 1;
}
#define ELF_CORE_COPY_XFPREGS 1
#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
static inline int
elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
{
struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
mm_segment_t old_fs;
if (!tsk_used_math(tsk))
return 0;
old_fs = get_fs();
set_fs(KERNEL_DS);
save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate);
set_fs(old_fs);
return 1;
}
#endif /* _ELFCORE32_H_ */
This diff is collapsed.
/*
* Copyright (C) 2001, 2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* Adapted from arch/i386/kernel/ldt.c
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/vmalloc.h>
#include <asm/uaccess.h>
#include "ia32priv.h"
/*
* read_ldt() is not really atomic - this is not a problem since synchronization of reads
* and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
* to protect the security checks done on new descriptors.
*/
static int
read_ldt (void __user *ptr, unsigned long bytecount)
{
unsigned long bytes_left, n;
char __user *src, *dst;
char buf[256]; /* temporary buffer (don't overflow kernel stack!) */
if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
bytes_left = bytecount;
src = (void __user *) IA32_LDT_OFFSET;
dst = ptr;
while (bytes_left) {
n = sizeof(buf);
if (n > bytes_left)
n = bytes_left;
/*
* We know we're reading valid memory, but we still must guard against
* running out of memory.
*/
if (__copy_from_user(buf, src, n))
return -EFAULT;
if (copy_to_user(dst, buf, n))
return -EFAULT;
src += n;
dst += n;
bytes_left -= n;
}
return bytecount;
}
static int
read_default_ldt (void __user * ptr, unsigned long bytecount)
{
unsigned long size;
int err;
/* XXX fix me: should return equivalent of default_ldt[0] */
err = 0;
size = 8;
if (size > bytecount)
size = bytecount;
err = size;
if (clear_user(ptr, size))
err = -EFAULT;
return err;
}
static int
write_ldt (void __user * ptr, unsigned long bytecount, int oldmode)
{
struct ia32_user_desc ldt_info;
__u64 entry;
int ret;
if (bytecount != sizeof(ldt_info))
return -EINVAL;
if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
return -EFAULT;
if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
return -EINVAL;
if (ldt_info.contents == 3) {
if (oldmode)
return -EINVAL;
if (ldt_info.seg_not_present == 0)
return -EINVAL;
}
if (ldt_info.base_addr == 0 && ldt_info.limit == 0
&& (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
&& ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
&& ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
/* allow LDTs to be cleared by the user */
entry = 0;
else
/* we must set the "Accessed" bit as IVE doesn't emulate it */
entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
(((ldt_info.read_exec_only ^ 1) << 1)
| (ldt_info.contents << 2)) | 1,
1, 3, ldt_info.seg_not_present ^ 1,
(oldmode ? 0 : ldt_info.useable),
ldt_info.seg_32bit,
ldt_info.limit_in_pages);
/*
* Install the new entry. We know we're accessing valid (mapped) user-level
* memory, but we still need to guard against out-of-memory, hence we must use
* put_user().
*/
ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number);
ia32_load_segment_descriptors(current);
return ret;
}
asmlinkage int
sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount)
{
int ret = -ENOSYS;
switch (func) {
case 0:
ret = read_ldt(compat_ptr(ptr), bytecount);
break;
case 1:
ret = write_ldt(compat_ptr(ptr), bytecount, 1);
break;
case 2:
ret = read_default_ldt(compat_ptr(ptr), bytecount);
break;
case 0x11:
ret = write_ldt(compat_ptr(ptr), bytecount, 0);
break;
}
return ret;
}
This diff is collapsed.
/*
* IA32 helper functions
*
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
* 02/19/01 D. Mosberger dropped tssd; it's not needed
* 09/14/01 D. Mosberger fixed memory management for gdt/tss page
* 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <asm/intrinsics.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include "ia32priv.h"
extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
struct page *ia32_shared_page[NR_CPUS];
unsigned long *ia32_boot_gdt;
unsigned long *cpu_gdt_table[NR_CPUS];
struct page *ia32_gate_page;
static unsigned long
load_desc (u16 selector)
{
unsigned long *table, limit, index;
if (!selector)
return 0;
if (selector & IA32_SEGSEL_TI) {
table = (unsigned long *) IA32_LDT_OFFSET;
limit = IA32_LDT_ENTRIES;
} else {
table = cpu_gdt_table[smp_processor_id()];
limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
}
index = selector >> IA32_SEGSEL_INDEX_SHIFT;
if (index >= limit)
return 0;
return IA32_SEG_UNSCRAMBLE(table[index]);
}
void
ia32_load_segment_descriptors (struct task_struct *task)
{
struct pt_regs *regs = task_pt_regs(task);
/* Setup the segment descriptors */
regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
}
int
ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
{
struct desc_struct *desc;
struct ia32_user_desc info;
int idx;
if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
return -EFAULT;
if (LDT_empty(&info))
return -EINVAL;
idx = info.entry_number;
if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
return -EINVAL;
desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
desc->a = LDT_entry_a(&info);
desc->b = LDT_entry_b(&info);
/* XXX: can this be done in a cleaner way ? */
load_TLS(&child->thread, smp_processor_id());
ia32_load_segment_descriptors(child);
load_TLS(&current->thread, smp_processor_id());
return 0;
}
void
ia32_save_state (struct task_struct *t)
{
t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
}
void
ia32_load_state (struct task_struct *t)
{
unsigned long eflag, fsr, fcr, fir, fdr, tssd;
struct pt_regs *regs = task_pt_regs(t);
eflag = t->thread.eflag;
fsr = t->thread.fsr;
fcr = t->thread.fcr;
fir = t->thread.fir;
fdr = t->thread.fdr;
tssd = load_desc(_TSS); /* TSSD */
ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
ia64_setreg(_IA64_REG_AR_FSR, fsr);
ia64_setreg(_IA64_REG_AR_FCR, fcr);
ia64_setreg(_IA64_REG_AR_FIR, fir);
ia64_setreg(_IA64_REG_AR_FDR, fdr);
current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
ia64_set_kr(IA64_KR_TSSD, tssd);
regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
regs->r30 = load_desc(_LDT); /* LDTD */
load_TLS(&t->thread, smp_processor_id());
}
/*
* Setup IA32 GDT and TSS
*/
void
ia32_gdt_init (void)
{
int cpu = smp_processor_id();
ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
if (!ia32_shared_page[cpu])
panic("failed to allocate ia32_shared_page[%d]\n", cpu);
cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
/* Copy from the boot cpu's GDT */
memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
}
/*
* Setup IA32 GDT and TSS
*/
static void
ia32_boot_gdt_init (void)
{
unsigned long ldt_size;
ia32_shared_page[0] = alloc_page(GFP_KERNEL);
if (!ia32_shared_page[0])
panic("failed to allocate ia32_shared_page[0]\n");
ia32_boot_gdt = page_address(ia32_shared_page[0]);
cpu_gdt_table[0] = ia32_boot_gdt;
/* CS descriptor in IA-32 (scrambled) format */
ia32_boot_gdt[__USER_CS >> 3]
= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
0xb, 1, 3, 1, 1, 1, 1);
/* DS descriptor in IA-32 (scrambled) format */
ia32_boot_gdt[__USER_DS >> 3]
= IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
0x3, 1, 3, 1, 1, 1, 1);
ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
0xb, 0, 3, 1, 1, 1, 0);
ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
0x2, 0, 3, 1, 1, 1, 0);
}
static void
ia32_gate_page_init(void)
{
unsigned long *sr;
ia32_gate_page = alloc_page(GFP_KERNEL);
sr = page_address(ia32_gate_page);
/* This is popl %eax ; movl $,%eax ; int $0x80 */
*sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
/* This is movl $,%eax ; int $0x80 */
*sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
}
void
ia32_mem_init(void)
{
ia32_boot_gdt_init();
ia32_gate_page_init();
}
/*
* Handle bad IA32 interrupt via syscall
*/
void
ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
{
siginfo_t siginfo;
if (die_if_kernel("Bad IA-32 interrupt", regs, int_num))
return;
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_addr = NULL;
siginfo.si_imm = 0;
siginfo.si_code = TRAP_BRKPT;
force_sig_info(SIGTRAP, &siginfo, current);
}
void
ia32_cpu_init (void)
{
/* initialize global ia32 state - CR0 and CR4 */
ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
}
static int __init
ia32_init (void)
{
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern struct kmem_cache *ia64_partial_page_cachep;
ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
sizeof(struct ia64_partial_page),
0, SLAB_PANIC, NULL);
}
#endif
return 0;
}
__initcall(ia32_init);
/*
* IA-32 exception handlers
*
* Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
* 09/29/00 D. Mosberger added ia32_intercept()
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include "ia32priv.h"
#include <asm/intrinsics.h>
#include <asm/ptrace.h>
int
ia32_intercept (struct pt_regs *regs, unsigned long isr)
{
switch ((isr >> 16) & 0xff) {
case 0: /* Instruction intercept fault */
case 4: /* Locked Data reference fault */
case 1: /* Gate intercept trap */
return -1;
case 2: /* System flag trap */
if (((isr >> 14) & 0x3) >= 2) {
/* MOV SS, POP SS instructions */
ia64_psr(regs)->id = 1;
return 0;
} else
return -1;
}
return -1;
}
int
ia32_exception (struct pt_regs *regs, unsigned long isr)
{
struct siginfo siginfo;
/* initialize these fields to avoid leaking kernel bits to user space: */
siginfo.si_errno = 0;
siginfo.si_flags = 0;
siginfo.si_isr = 0;
siginfo.si_imm = 0;
switch ((isr >> 16) & 0xff) {
case 1:
case 2:
siginfo.si_signo = SIGTRAP;
if (isr == 0)
siginfo.si_code = TRAP_TRACE;
else if (isr & 0x4)
siginfo.si_code = TRAP_BRANCH;
else
siginfo.si_code = TRAP_BRKPT;
break;
case 3:
siginfo.si_signo = SIGTRAP;
siginfo.si_code = TRAP_BRKPT;
break;
case 0: /* Divide fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = FPE_INTDIV;
break;
case 4: /* Overflow */
case 5: /* Bounds fault */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
case 6: /* Invalid Op-code */
siginfo.si_signo = SIGILL;
siginfo.si_code = ILL_ILLOPN;
break;
case 7: /* FP DNA */
case 8: /* Double Fault */
case 9: /* Invalid TSS */
case 11: /* Segment not present */
case 12: /* Stack fault */
case 13: /* General Protection Fault */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = 0;
break;
case 16: /* Pending FP error */
{
unsigned long fsr, fcr;
fsr = ia64_getreg(_IA64_REG_AR_FSR);
fcr = ia64_getreg(_IA64_REG_AR_FCR);
siginfo.si_signo = SIGFPE;
/*
* (~cwd & swd) will mask out exceptions that are not set to unmasked
* status. 0x3f is the exception bits in these regs, 0x200 is the
* C1 reg you need in case of a stack fault, 0x040 is the stack
* fault bit. We should only be taking one exception at a time,
* so if this combination doesn't produce any single exception,
* then we have a bad program that isn't synchronizing its FPU usage
* and it will suffer the consequences since we won't be able to
* fully reproduce the context of the exception
*/
siginfo.si_isr = isr;
siginfo.si_flags = __ISR_VALID;
switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
case 0x000:
default:
siginfo.si_code = 0;
break;
case 0x001: /* Invalid Op */
case 0x040: /* Stack Fault */
case 0x240: /* Stack Fault | Direction */
siginfo.si_code = FPE_FLTINV;
break;
case 0x002: /* Denormalize */
case 0x010: /* Underflow */
siginfo.si_code = FPE_FLTUND;
break;
case 0x004: /* Zero Divide */
siginfo.si_code = FPE_FLTDIV;
break;
case 0x008: /* Overflow */
siginfo.si_code = FPE_FLTOVF;
break;
case 0x020: /* Precision */
siginfo.si_code = FPE_FLTRES;
break;
}
break;
}
case 17: /* Alignment check */
siginfo.si_signo = SIGSEGV;
siginfo.si_code = BUS_ADRALN;
break;
case 19: /* SSE Numeric error */
siginfo.si_signo = SIGFPE;
siginfo.si_code = 0;
break;
default:
return -1;
}
force_sig_info(siginfo.si_signo, &siginfo, current);
return 0;
}
This diff is collapsed.
This diff is collapsed.
#ifndef _ASM_IA64_IA32_H
#define _ASM_IA64_IA32_H
#include <asm/ptrace.h>
#include <asm/signal.h>
#define IA32_NR_syscalls 285 /* length of syscall table */
#define IA32_PAGE_SHIFT 12 /* 4KB pages */
#ifndef __ASSEMBLY__
# ifdef CONFIG_IA32_SUPPORT
#define IA32_PAGE_OFFSET 0xc0000000
extern void ia32_cpu_init (void);
extern void ia32_mem_init (void);
extern void ia32_gdt_init (void);
extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
# endif /* !CONFIG_IA32_SUPPORT */
/* Declare this unconditionally, so we don't get warnings for unreachable code. */
extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs);
#if PAGE_SHIFT > IA32_PAGE_SHIFT
extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
unsigned long);
extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
#else
# define ia32_copy_ia64_partial_page_list(a1, a2) 0
# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_IA32_H */
...@@ -270,23 +270,6 @@ typedef struct { ...@@ -270,23 +270,6 @@ typedef struct {
(int __user *) (addr)); \ (int __user *) (addr)); \
}) })
#ifdef CONFIG_IA32_SUPPORT
struct desc_struct {
unsigned int a, b;
};
#define desc_empty(desc) (!((desc)->a | (desc)->b))
#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
#define GDT_ENTRY_TLS_ENTRIES 3
#define GDT_ENTRY_TLS_MIN 6
#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
struct ia64_partial_page_list;
#endif
struct thread_struct { struct thread_struct {
__u32 flags; /* various thread flags (see IA64_THREAD_*) */ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
...@@ -298,29 +281,6 @@ struct thread_struct { ...@@ -298,29 +281,6 @@ struct thread_struct {
__u64 rbs_bot; /* the base address for the RBS */ __u64 rbs_bot; /* the base address for the RBS */
int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
#ifdef CONFIG_IA32_SUPPORT
__u64 eflag; /* IA32 EFLAGS reg */
__u64 fsr; /* IA32 floating pt status reg */
__u64 fcr; /* IA32 floating pt control reg */
__u64 fir; /* IA32 fp except. instr. reg */
__u64 fdr; /* IA32 fp except. data reg */
__u64 old_k1; /* old value of ar.k1 */
__u64 old_iob; /* old IOBase value */
struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
/* cached TLS descriptors. */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
# define INIT_THREAD_IA32 .eflag = 0, \
.fsr = 0, \
.fcr = 0x17800000037fULL, \
.fir = 0, \
.fdr = 0, \
.old_k1 = 0, \
.old_iob = 0, \
.ppl = NULL,
#else
# define INIT_THREAD_IA32
#endif /* CONFIG_IA32_SUPPORT */
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
void *pfm_context; /* pointer to detailed PMU context */ void *pfm_context; /* pointer to detailed PMU context */
unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
...@@ -342,7 +302,6 @@ struct thread_struct { ...@@ -342,7 +302,6 @@ struct thread_struct {
.rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
.task_size = DEFAULT_TASK_SIZE, \ .task_size = DEFAULT_TASK_SIZE, \
.last_fph_cpu = -1, \ .last_fph_cpu = -1, \
INIT_THREAD_IA32 \
INIT_THREAD_PM \ INIT_THREAD_PM \
.dbr = {0, }, \ .dbr = {0, }, \
.ibr = {0, }, \ .ibr = {0, }, \
...@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph); ...@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph);
extern void ia64_save_debug_regs (unsigned long *save_area); extern void ia64_save_debug_regs (unsigned long *save_area);
extern void ia64_load_debug_regs (unsigned long *save_area); extern void ia64_load_debug_regs (unsigned long *save_area);
#ifdef CONFIG_IA32_SUPPORT
extern void ia32_save_state (struct task_struct *task);
extern void ia32_load_state (struct task_struct *task);
#endif
#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
......
...@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task, ...@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task,
if ((long)regs->cr_ifs < 0) /* Not a syscall */ if ((long)regs->cr_ifs < 0) /* Not a syscall */
return -1; return -1;
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
return regs->r1;
#endif
return regs->r15; return regs->r15;
} }
static inline void syscall_rollback(struct task_struct *task, static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
regs->r8 = regs->r1;
#endif
/* do nothing */ /* do nothing */
} }
static inline long syscall_get_error(struct task_struct *task, static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs))
return regs->r8;
#endif
return regs->r10 == -1 ? regs->r8:0; return regs->r10 == -1 ? regs->r8:0;
} }
...@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task, ...@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs, struct pt_regs *regs,
int error, long val) int error, long val)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
regs->r8 = (long) error ? error : val;
return;
}
#endif
if (error) { if (error) {
/* error < 0, but ia64 uses > 0 return value */ /* error < 0, but ia64 uses > 0 return value */
regs->r8 = -error; regs->r8 = -error;
...@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task, ...@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
{ {
BUG_ON(i + n > 6); BUG_ON(i + n > 6);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
switch (i + n) {
case 6:
if (!n--) break;
*args++ = regs->r13;
case 5:
if (!n--) break;
*args++ = regs->r15;
case 4:
if (!n--) break;
*args++ = regs->r14;
case 3:
if (!n--) break;
*args++ = regs->r10;
case 2:
if (!n--) break;
*args++ = regs->r9;
case 1:
if (!n--) break;
*args++ = regs->r11;
case 0:
if (!n--) break;
default:
BUG();
break;
}
return;
}
#endif
ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
} }
...@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task, ...@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
{ {
BUG_ON(i + n > 6); BUG_ON(i + n > 6);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(regs)) {
switch (i + n) {
case 6:
if (!n--) break;
regs->r13 = *args++;
case 5:
if (!n--) break;
regs->r15 = *args++;
case 4:
if (!n--) break;
regs->r14 = *args++;
case 3:
if (!n--) break;
regs->r10 = *args++;
case 2:
if (!n--) break;
regs->r9 = *args++;
case 1:
if (!n--) break;
regs->r11 = *args++;
case 0:
if (!n--) break;
}
return;
}
#endif
ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
} }
#endif /* _ASM_SYSCALL_H */ #endif /* _ASM_SYSCALL_H */
...@@ -191,15 +191,6 @@ do { \ ...@@ -191,15 +191,6 @@ do { \
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_IA32_SUPPORT
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
#else
# define IS_IA32_PROCESS(regs) 0
struct task_struct;
static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
#endif
/* /*
* Context switch from one thread to another. If the two threads have * Context switch from one thread to another. If the two threads have
* different address spaces, schedule() has already taken care of * different address spaces, schedule() has already taken care of
...@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct ...@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct
#define IA64_HAS_EXTRA_STATE(t) \ #define IA64_HAS_EXTRA_STATE(t) \
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
|| IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) || PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \ #define __switch_to(prev,next,last) do { \
IA64_ACCOUNT_ON_SWITCH(prev, next); \ IA64_ACCOUNT_ON_SWITCH(prev, next); \
......
...@@ -335,20 +335,6 @@ ...@@ -335,20 +335,6 @@
#define __ARCH_WANT_SYS_RT_SIGACTION #define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND #define __ARCH_WANT_SYS_RT_SIGSUSPEND
#ifdef CONFIG_IA32_SUPPORT
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETPGRP
# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_PAUSE
# define __ARCH_WANT_SYS_SIGPENDING
# define __ARCH_WANT_SYS_SIGPROCMASK
# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
# define __ARCH_WANT_COMPAT_SYS_TIME
#endif
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) #if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#include <linux/types.h> #include <linux/types.h>
......
...@@ -30,20 +30,11 @@ static unsigned signal_class[] = { ...@@ -30,20 +30,11 @@ static unsigned signal_class[] = {
int audit_classify_arch(int arch) int audit_classify_arch(int arch)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (arch == AUDIT_ARCH_I386)
return 1;
#endif
return 0; return 0;
} }
int audit_classify_syscall(int abi, unsigned syscall) int audit_classify_syscall(int abi, unsigned syscall)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern int ia32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_I386)
return ia32_classify_syscall(syscall);
#endif
switch(syscall) { switch(syscall) {
case __NR_open: case __NR_open:
return 2; return 2;
...@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall) ...@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void) static int __init audit_classes_init(void)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern __u32 ia32_dir_class[];
extern __u32 ia32_write_class[];
extern __u32 ia32_read_class[];
extern __u32 ia32_chattr_class[];
extern __u32 ia32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
......
...@@ -71,15 +71,6 @@ ENTRY(ia64_execve) ...@@ -71,15 +71,6 @@ ENTRY(ia64_execve)
add out3=16,sp // regs add out3=16,sp // regs
br.call.sptk.many rp=sys_execve br.call.sptk.many rp=sys_execve
.ret0: .ret0:
#ifdef CONFIG_IA32_SUPPORT
/*
* Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
* from pt_regs.
*/
adds r16=PT(CR_IPSR)+16,sp
;;
ld8 r16=[r16]
#endif
cmp4.ge p6,p7=r8,r0 cmp4.ge p6,p7=r8,r0
mov ar.pfs=loc1 // restore ar.pfs mov ar.pfs=loc1 // restore ar.pfs
sxt4 r8=r8 // return 64-bit result sxt4 r8=r8 // return 64-bit result
...@@ -108,12 +99,6 @@ ENTRY(ia64_execve) ...@@ -108,12 +99,6 @@ ENTRY(ia64_execve)
ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
#ifdef CONFIG_IA32_SUPPORT
tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
movl loc0=ia64_ret_from_ia32_execve
;;
(p6) mov rp=loc0
#endif
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_execve) END(ia64_execve)
...@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall: ...@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall:
br.cond.sptk.many rbs_switch // B br.cond.sptk.many rbs_switch // B
END(__paravirt_leave_syscall) END(__paravirt_leave_syscall)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
#ifdef CONFIG_IA32_SUPPORT
GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
PT_REGS_UNWIND_INFO(0)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
;;
.mem.offset 0,0
st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
.mem.offset 8,0
st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
#ifdef CONFIG_PARAVIRT
;;
// don't fall through, ia64_leave_kernel may be #define'd
br.cond.sptk.few ia64_leave_kernel
;;
#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_ia32_execve)
#ifndef CONFIG_PARAVIRT
// fall through
#endif
#endif /* CONFIG_IA32_SUPPORT */
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
GLOBAL_ENTRY(__paravirt_leave_kernel) GLOBAL_ENTRY(__paravirt_leave_kernel)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
#include <asm/break.h> #include <asm/break.h>
#include <asm/ia32.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -1386,28 +1385,6 @@ END(ia32_exception) ...@@ -1386,28 +1385,6 @@ END(ia32_exception)
// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
ENTRY(ia32_intercept) ENTRY(ia32_intercept)
DBG_FAULT(46) DBG_FAULT(46)
#ifdef CONFIG_IA32_SUPPORT
mov r31=pr
MOV_FROM_ISR(r16)
;;
extr.u r17=r16,16,8 // get ISR.code
mov r18=ar.eflag
MOV_FROM_IIM(r19) // old eflag value
;;
cmp.ne p6,p0=2,r17
(p6) br.cond.spnt 1f // not a system flag fault
xor r16=r18,r19
;;
extr.u r17=r16,18,1 // get the eflags.ac bit
;;
cmp.eq p6,p0=0,r17
(p6) br.cond.spnt 1f // eflags.ac bit didn't change
;;
mov pr=r31,-1 // restore predicate registers
RFI
1:
#endif // CONFIG_IA32_SUPPORT
FAULT(46) FAULT(46)
END(ia32_intercept) END(ia32_intercept)
...@@ -1416,12 +1393,7 @@ END(ia32_intercept) ...@@ -1416,12 +1393,7 @@ END(ia32_intercept)
// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
ENTRY(ia32_interrupt) ENTRY(ia32_interrupt)
DBG_FAULT(47) DBG_FAULT(47)
#ifdef CONFIG_IA32_SUPPORT
mov r31=pr
br.sptk.many dispatch_to_ia32_handler
#else
FAULT(47) FAULT(47)
#endif
END(ia32_interrupt) END(ia32_interrupt)
.org ia64_ivt+0x6c00 .org ia64_ivt+0x6c00
...@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault) ...@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault)
(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
br.sptk.many ia64_leave_kernel br.sptk.many ia64_leave_kernel
END(dispatch_illegal_op_fault) END(dispatch_illegal_op_fault)
#ifdef CONFIG_IA32_SUPPORT
/*
* There is no particular reason for this code to be here, other than that
* there happens to be space here that would go unused otherwise. If this
* fault ever gets "unreserved", simply moved the following code to a more
* suitable spot...
*/
// IA32 interrupt entry point
ENTRY(dispatch_to_ia32_handler)
SAVE_MIN
;;
MOV_FROM_ISR(r14)
SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
// guarantee that interruption collection is on
;;
SSM_PSR_I(p15, p15, r3)
adds r3=8,r2 // Base pointer for SAVE_REST
;;
SAVE_REST
;;
mov r15=0x80
shr r14=r14,16 // Get interrupt number
;;
cmp.ne p6,p0=r14,r15
(p6) br.call.dpnt.many b6=non_ia32_syscall
adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
;;
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
ld8 r8=[r14] // get r8
;;
st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
;;
alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
;;
ld4 r8=[r14],8 // r8 == eax (syscall number)
mov r15=IA32_NR_syscalls
;;
cmp.ltu.unc p6,p7=r8,r15
ld4 out1=[r14],8 // r9 == ecx
;;
ld4 out2=[r14],8 // r10 == edx
;;
ld4 out0=[r14] // r11 == ebx
adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
;;
ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
;;
ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 out4=[r14] // r15 == edi
movl r16=ia32_syscall_table
;;
(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
ld8 r16=[r16]
and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
;;
mov b6=r16
movl r15=ia32_ret_from_syscall
cmp.eq p8,p0=r2,r0
;;
mov rp=r15
(p8) br.call.sptk.many b6=b6
br.cond.sptk ia32_trace_syscall
non_ia32_syscall:
alloc r15=ar.pfs,0,0,2,0
mov out0=r14 // interrupt #
add out1=16,sp // pointer to pt_regs
;; // avoid WAW on CFM
br.call.sptk.many rp=ia32_bad_interrupt
.ret1: movl r15=ia64_leave_kernel
;;
mov rp=r15
br.ret.sptk.many rp
END(dispatch_to_ia32_handler)
#endif /* CONFIG_IA32_SUPPORT */
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/ia32.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task) ...@@ -358,11 +357,6 @@ ia64_save_extra (struct task_struct *task)
if (info & PFM_CPUINFO_SYST_WIDE) if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0); pfm_syst_wide_update_task(task, info, 0);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_save_state(task);
#endif
} }
void void
...@@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task) ...@@ -383,11 +377,6 @@ ia64_load_extra (struct task_struct *task)
if (info & PFM_CPUINFO_SYST_WIDE) if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1); pfm_syst_wide_update_task(task, info, 1);
#endif #endif
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(task)))
ia32_load_state(task);
#endif
} }
/* /*
...@@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags, ...@@ -426,7 +415,7 @@ copy_thread(unsigned long clone_flags,
unsigned long user_stack_base, unsigned long user_stack_size, unsigned long user_stack_base, unsigned long user_stack_size,
struct task_struct *p, struct pt_regs *regs) struct task_struct *p, struct pt_regs *regs)
{ {
extern char ia64_ret_from_clone, ia32_ret_from_clone; extern char ia64_ret_from_clone;
struct switch_stack *child_stack, *stack; struct switch_stack *child_stack, *stack;
unsigned long rbs, child_rbs, rbs_size; unsigned long rbs, child_rbs, rbs_size;
struct pt_regs *child_ptregs; struct pt_regs *child_ptregs;
...@@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags, ...@@ -457,7 +446,7 @@ copy_thread(unsigned long clone_flags,
memcpy((void *) child_rbs, (void *) rbs, rbs_size); memcpy((void *) child_rbs, (void *) rbs, rbs_size);
if (likely(user_mode(child_ptregs))) { if (likely(user_mode(child_ptregs))) {
if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) if (clone_flags & CLONE_SETTLS)
child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
if (user_stack_base) { if (user_stack_base) {
child_ptregs->r12 = user_stack_base + user_stack_size - 16; child_ptregs->r12 = user_stack_base + user_stack_size - 16;
...@@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags, ...@@ -477,10 +466,7 @@ copy_thread(unsigned long clone_flags,
child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
} }
child_stack->ar_bspstore = child_rbs + rbs_size; child_stack->ar_bspstore = child_rbs + rbs_size;
if (IS_IA32_PROCESS(regs)) child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
else
child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
/* copy parts of thread_struct: */ /* copy parts of thread_struct: */
p->thread.ksp = (unsigned long) child_stack - 16; p->thread.ksp = (unsigned long) child_stack - 16;
...@@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags, ...@@ -515,22 +501,6 @@ copy_thread(unsigned long clone_flags,
p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
| THREAD_FLAGS_TO_SET); | THREAD_FLAGS_TO_SET);
ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
#ifdef CONFIG_IA32_SUPPORT
/*
* If we're cloning an IA32 task then save the IA32 extra
* state from the current task to the new task
*/
if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_save_state(p);
if (clone_flags & CLONE_SETTLS)
retval = ia32_clone_tls(p, child_ptregs);
/* Copy partially mapped page list */
if (!retval)
retval = ia32_copy_ia64_partial_page_list(p,
clone_flags);
}
#endif
#ifdef CONFIG_PERFMON #ifdef CONFIG_PERFMON
if (current->thread.pfm_context) if (current->thread.pfm_context)
...@@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread); ...@@ -704,15 +674,6 @@ EXPORT_SYMBOL(kernel_thread);
int int
kernel_thread_helper (int (*fn)(void *), void *arg) kernel_thread_helper (int (*fn)(void *), void *arg)
{ {
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(current))) {
/* A kernel thread is always a 64-bit process. */
current->thread.map_base = DEFAULT_MAP_BASE;
current->thread.task_size = DEFAULT_TASK_SIZE;
ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
}
#endif
return (*fn)(arg); return (*fn)(arg);
} }
...@@ -725,14 +686,6 @@ flush_thread (void) ...@@ -725,14 +686,6 @@ flush_thread (void)
/* drop floating-point and debug-register state if it exists: */ /* drop floating-point and debug-register state if it exists: */
current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
ia64_drop_fpu(current); ia64_drop_fpu(current);
#ifdef CONFIG_IA32_SUPPORT
if (IS_IA32_PROCESS(task_pt_regs(current))) {
ia32_drop_ia64_partial_page_list(current);
current->thread.task_size = IA32_PAGE_OFFSET;
set_fs(USER_DS);
memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array));
}
#endif
} }
/* /*
...@@ -753,8 +706,6 @@ exit_thread (void) ...@@ -753,8 +706,6 @@ exit_thread (void)
if (current->thread.flags & IA64_THREAD_DBG_VALID) if (current->thread.flags & IA64_THREAD_DBG_VALID)
pfm_release_debug_registers(current); pfm_release_debug_registers(current);
#endif #endif
if (IS_IA32_PROCESS(task_pt_regs(current)))
ia32_drop_ia64_partial_page_list(current);
} }
unsigned long unsigned long
......
...@@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3, ...@@ -1250,13 +1250,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
long syscall; long syscall;
int arch; int arch;
if (IS_IA32_PROCESS(&regs)) { syscall = regs.r15;
syscall = regs.r1; arch = AUDIT_ARCH_IA64;
arch = AUDIT_ARCH_I386;
} else {
syscall = regs.r15;
arch = AUDIT_ARCH_IA64;
}
audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
} }
...@@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = { ...@@ -2172,11 +2167,6 @@ static const struct user_regset_view user_ia64_view = {
const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
{ {
#ifdef CONFIG_IA32_SUPPORT
extern const struct user_regset_view user_ia32_view;
if (IS_IA32_PROCESS(task_pt_regs(tsk)))
return &user_ia32_view;
#endif
return &user_ia64_view; return &user_ia64_view;
} }
......
...@@ -46,7 +46,6 @@ ...@@ -46,7 +46,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/crash_dump.h> #include <linux/crash_dump.h>
#include <asm/ia32.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/meminit.h> #include <asm/meminit.h>
...@@ -1016,10 +1015,6 @@ cpu_init (void) ...@@ -1016,10 +1015,6 @@ cpu_init (void)
ia64_mmu_init(ia64_imva(cpu_data)); ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
ia32_cpu_init();
#endif
/* Clear ITC to eliminate sched_clock() overflows in human time. */ /* Clear ITC to eliminate sched_clock() overflows in human time. */
ia64_set_itc(0); ia64_set_itc(0);
......
...@@ -21,7 +21,6 @@ ...@@ -21,7 +21,6 @@
#include <linux/unistd.h> #include <linux/unistd.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/rse.h> #include <asm/rse.h>
...@@ -425,14 +424,8 @@ static long ...@@ -425,14 +424,8 @@ static long
handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
struct sigscratch *scr) struct sigscratch *scr)
{ {
if (IS_IA32_PROCESS(&scr->pt)) { if (!setup_frame(sig, ka, info, oldset, scr))
/* send signal to IA-32 process */ return 0;
if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt))
return 0;
} else
/* send signal to IA-64 process */
if (!setup_frame(sig, ka, info, oldset, scr))
return 0;
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
...@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
siginfo_t info; siginfo_t info;
long restart = in_syscall; long restart = in_syscall;
long errno = scr->pt.r8; long errno = scr->pt.r8;
# define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c))
/* /*
* In the ia64_leave_kernel code path, we want the common case to go fast, which * In the ia64_leave_kernel code path, we want the common case to go fast, which
...@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
* inferior call), thus it's important to check for restarting _after_ * inferior call), thus it's important to check for restarting _after_
* get_signal_to_deliver(). * get_signal_to_deliver().
*/ */
if (IS_IA32_PROCESS(&scr->pt)) { if ((long) scr->pt.r10 != -1)
if (in_syscall) {
if (errno >= 0)
restart = 0;
else
errno = -errno;
}
} else if ((long) scr->pt.r10 != -1)
/* /*
* A system calls has to be restarted only if one of the error codes * A system calls has to be restarted only if one of the error codes
* ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
...@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
switch (errno) { switch (errno) {
case ERESTART_RESTARTBLOCK: case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND: case ERESTARTNOHAND:
scr->pt.r8 = ERR_CODE(EINTR); scr->pt.r8 = EINTR;
/* note: scr->pt.r10 is already -1 */ /* note: scr->pt.r10 is already -1 */
break; break;
case ERESTARTSYS: case ERESTARTSYS:
if ((ka.sa.sa_flags & SA_RESTART) == 0) { if ((ka.sa.sa_flags & SA_RESTART) == 0) {
scr->pt.r8 = ERR_CODE(EINTR); scr->pt.r8 = EINTR;
/* note: scr->pt.r10 is already -1 */ /* note: scr->pt.r10 is already -1 */
break; break;
} }
case ERESTARTNOINTR: case ERESTARTNOINTR:
if (IS_IA32_PROCESS(&scr->pt)) { ia64_decrement_ip(&scr->pt);
scr->pt.r8 = scr->pt.r1;
scr->pt.cr_iip -= 2;
} else
ia64_decrement_ip(&scr->pt);
restart = 0; /* don't restart twice if handle_signal() fails... */ restart = 0; /* don't restart twice if handle_signal() fails... */
} }
} }
...@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall) ...@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
|| errno == ERESTART_RESTARTBLOCK) || errno == ERESTART_RESTARTBLOCK)
{ {
if (IS_IA32_PROCESS(&scr->pt)) { /*
scr->pt.r8 = scr->pt.r1; * Note: the syscall number is in r15 which is saved in
scr->pt.cr_iip -= 2; * pt_regs so all we need to do here is adjust ip so that
if (errno == ERESTART_RESTARTBLOCK) * the "break" instruction gets re-executed.
scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ */
} else { ia64_decrement_ip(&scr->pt);
/* if (errno == ERESTART_RESTARTBLOCK)
* Note: the syscall number is in r15 which is saved in scr->pt.r15 = __NR_restart_syscall;
* pt_regs so all we need to do here is adjust ip so that
* the "break" instruction gets re-executed.
*/
ia64_decrement_ip(&scr->pt);
if (errno == ERESTART_RESTARTBLOCK)
scr->pt.r15 = __NR_restart_syscall;
}
} }
} }
......
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/ia32.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/machvec.h> #include <asm/machvec.h>
...@@ -443,10 +442,6 @@ smp_callin (void) ...@@ -443,10 +442,6 @@ smp_callin (void)
calibrate_delay(); calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy; local_cpu_data->loops_per_jiffy = loops_per_jiffy;
#ifdef CONFIG_IA32_SUPPORT
ia32_gdt_init();
#endif
/* /*
* Allow the master to continue. * Allow the master to continue.
*/ */
......
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <asm/fpswa.h> #include <asm/fpswa.h>
#include <asm/ia32.h>
#include <asm/intrinsics.h> #include <asm/intrinsics.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 45: case 45:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_exception(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
iip, ifa, isr); iip, ifa, isr);
...@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa, ...@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
break; break;
case 46: case 46:
#ifdef CONFIG_IA32_SUPPORT
if (ia32_intercept(&regs, isr) == 0)
return;
#endif
printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
iip, ifa, isr, iim); iip, ifa, isr, iim);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/numa.h> #include <asm/numa.h>
...@@ -668,10 +667,6 @@ mem_init (void) ...@@ -668,10 +667,6 @@ mem_init (void)
fsyscall_table[i] = sys_call_table[i] | 1; fsyscall_table[i] = sys_call_table[i] | 1;
} }
setup_gate(); setup_gate();
#ifdef CONFIG_IA32_SUPPORT
ia32_mem_init();
#endif
} }
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
......
...@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA) ...@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
#ifdef CONFIG_IA32_SUPPORT
__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
#endif /* CONFIG_IA32_SUPPORT */
GLOBAL_ENTRY(xen_set_rr0_to_rr4) GLOBAL_ENTRY(xen_set_rr0_to_rr4)
mov r8=r32 mov r8=r32
mov r9=r33 mov r9=r33
......
...@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val) ...@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val)
case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
xen_set_kr(regnum - _IA64_REG_AR_KR0, val); xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
break; break;
#ifdef CONFIG_IA32_SUPPORT
case _IA64_REG_AR_EFLAG:
xen_set_eflag(val);
break;
#endif
case _IA64_REG_AR_ITC: case _IA64_REG_AR_ITC:
xen_set_itc(val); xen_set_itc(val);
break; break;
...@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum) ...@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum)
case _IA64_REG_PSR: case _IA64_REG_PSR:
res = xen_get_psr(); res = xen_get_psr();
break; break;
#ifdef CONFIG_IA32_SUPPORT
case _IA64_REG_AR_EFLAG:
res = xen_get_eflag();
break;
#endif
case _IA64_REG_AR_ITC: case _IA64_REG_AR_ITC:
res = xen_get_itc(); res = xen_get_itc();
break; break;
...@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum); ...@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum);
__DEFINE_FUNC(getreg, __DEFINE_FUNC(getreg,
__DEFINE_GET_REG(PSR, PSR) __DEFINE_GET_REG(PSR, PSR)
#ifdef CONFIG_IA32_SUPPORT
__DEFINE_GET_REG(AR_EFLAG, EFLAG)
#endif
/* get_itc */ /* get_itc */
"mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
...@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg, ...@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg,
";;\n" ";;\n"
"(p6) br.cond.spnt xen_set_itc\n" "(p6) br.cond.spnt xen_set_itc\n"
#ifdef CONFIG_IA32_SUPPORT
__DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
#endif
__DEFINE_SET_REG(CR_TPR, SET_TPR) __DEFINE_SET_REG(CR_TPR, SET_TPR)
__DEFINE_SET_REG(CR_EOI, EOI) __DEFINE_SET_REG(CR_EOI, EOI)
......
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
you why the ifdefs are needed? Think about it again. -AK */ you why the ifdefs are needed? Think about it again. -AK */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
# define INPUT_COMPAT_TEST is_compat_task() # define INPUT_COMPAT_TEST is_compat_task()
#elif defined(CONFIG_IA64)
# define INPUT_COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
#elif defined(CONFIG_S390) #elif defined(CONFIG_S390)
# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) # define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
#elif defined(CONFIG_MIPS) #elif defined(CONFIG_MIPS)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment