Commit 7a280cf5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'disintegrate-x86-20121214' of git://git.infradead.org/users/dhowells/linux-headers

Pull x86 UAPI disintegration from David Howells.

This is the scripted disintegration of the uapi headers for x86, now
that most of the x86 updates for 3.8 are hopefully merged.

* tag 'disintegrate-x86-20121214' of git://git.infradead.org/users/dhowells/linux-headers:
  UAPI: (Scripted) Disintegrate arch/x86/include/asm
parents 4939e27d af170c50
include include/asm-generic/Kbuild.asm
header-y += boot.h
header-y += bootparam.h
header-y += debugreg.h
header-y += e820.h
header-y += hw_breakpoint.h
header-y += hyperv.h
header-y += ist.h
header-y += ldt.h
header-y += mce.h
header-y += msr-index.h
header-y += msr.h
header-y += mtrr.h
header-y += perf_regs.h
header-y += posix_types_32.h
header-y += posix_types_64.h
header-y += posix_types_x32.h
header-y += prctl.h
header-y += processor-flags.h
header-y += ptrace-abi.h
header-y += sigcontext32.h
header-y += svm.h
header-y += ucontext.h
header-y += vm86.h
header-y += vmx.h
header-y += vsyscall.h
genhdr-y += unistd_32.h genhdr-y += unistd_32.h
genhdr-y += unistd_64.h genhdr-y += unistd_64.h
......
#ifndef _ASM_X86_BOOT_H #ifndef _ASM_X86_BOOT_H
#define _ASM_X86_BOOT_H #define _ASM_X86_BOOT_H
/* Internal svga startup constants */
#define NORMAL_VGA 0xffff /* 80x25 mode */
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
#ifdef __KERNEL__
#include <asm/pgtable_types.h> #include <asm/pgtable_types.h>
#include <uapi/asm/boot.h>
/* Physical address where kernel should be loaded. */ /* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
...@@ -42,6 +37,4 @@ ...@@ -42,6 +37,4 @@
#define BOOT_STACK_SIZE 0x1000 #define BOOT_STACK_SIZE 0x1000
#endif #endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BOOT_H */ #endif /* _ASM_X86_BOOT_H */
...@@ -2,83 +2,8 @@ ...@@ -2,83 +2,8 @@
#define _ASM_X86_DEBUGREG_H #define _ASM_X86_DEBUGREG_H
/* Indicate the register numbers for a number of the specific
debug registers. Registers 0-3 contain the addresses we wish to trap on */
#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
/* Define a few things for the status register. We can use this to determine
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
/* Define reserved bits in DR6 which are always set to 1 */
#define DR6_RESERVED (0xFFFF0FF0)
#define DR_TRAP0 (0x1) /* db0 */
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
#define DR_TRAP3 (0x8) /* db3 */
#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
/* Now define a bunch of things for manipulating the control register.
The top two bytes of the control register consist of 4 fields of 4
bits - each field corresponds to one of the four debug registers,
and indicates what types of access we trap on, and how large the data
field is that we are looking at */
#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
#define DR_RW_WRITE (0x1)
#define DR_RW_READ (0x3)
#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
#define DR_LEN_2 (0x4)
#define DR_LEN_4 (0xC)
#define DR_LEN_8 (0x8)
/* The low byte to the control register determine which registers are
enabled. There are 4 fields of two bits. One bit is "local", meaning
that the processor will reset the bit after a task switch and the other
is global meaning that we have to explicitly reset the bit. With linux,
you can use either one, since we explicitly zero the register when we enter
kernel mode. */
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
/* The second byte to the control register has a few special things.
We can slow the instruction pipeline for instructions coming via the
gdt or the ldt if we want to. I am not sure why this is an advantage */
#ifdef __i386__
#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
#else
#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
#endif
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
/*
* HW breakpoint additions
*/
#ifdef __KERNEL__
#include <linux/bug.h> #include <linux/bug.h>
#include <uapi/asm/debugreg.h>
DECLARE_PER_CPU(unsigned long, cpu_dr7); DECLARE_PER_CPU(unsigned long, cpu_dr7);
...@@ -190,6 +115,4 @@ static inline void debug_stack_usage_dec(void) { } ...@@ -190,6 +115,4 @@ static inline void debug_stack_usage_dec(void) { }
#endif /* X86_64 */ #endif /* X86_64 */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_DEBUGREG_H */ #endif /* _ASM_X86_DEBUGREG_H */
#ifndef _ASM_X86_E820_H #ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H #define _ASM_X86_E820_H
#define E820MAP 0x2d0 /* our map */
#define E820MAX 128 /* number of entries in E820MAP */
/*
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
* constrained space in the zeropage. If we have more nodes than
* that, and if we've booted off EFI firmware, then the EFI tables
* passed us from the EFI firmware can list more nodes. Size our
* internal memory map tables to have room for these additional
* nodes, based on up to three entries per node for which the
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
* plus E820MAX, allowing space for the possible duplicate E820
* entries that might need room in the same arrays, prior to the
* call to sanitize_e820_map() to remove duplicates. The allowance
* of three memory map entries per node is "enough" entries for
* the initial hardware platform motivating this mechanism to make
* use of additional EFI map entries. Future platforms may want
* to allow more than three entries per node or otherwise refine
* this size.
*/
/*
* Odd: 'make headers_check' complains about numa.h if I try
* to collapse the next two #ifdef lines to a single line:
* #if defined(__KERNEL__) && defined(CONFIG_EFI)
*/
#ifdef __KERNEL__
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
#include <linux/numa.h> #include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) #define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
#else /* ! CONFIG_EFI */ #else /* ! CONFIG_EFI */
#define E820_X_MAX E820MAX #define E820_X_MAX E820MAX
#endif #endif
#else /* ! __KERNEL__ */ #include <uapi/asm/e820.h>
#define E820_X_MAX E820MAX
#endif
#define E820NR 0x1e8 /* # entries in E820MAP */
#define E820_RAM 1
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
/*
* reserved RAM used by kernel itself
* if CONFIG_INTEL_TXT is enabled, memory of this type will be
* included in the S3 integrity calculation and so should not include
* any memory that BIOS might alter over the S3 transition
*/
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
struct e820entry {
__u64 addr; /* start of memory segment */
__u64 size; /* size of memory segment */
__u32 type; /* type of memory segment */
} __attribute__((packed));
struct e820map {
__u32 nr_map;
struct e820entry map[E820_X_MAX];
};
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
#define BIOS_ROM_BASE 0xffe00000
#define BIOS_ROM_END 0xffffffff
#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */ /* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820; extern struct e820map e820;
extern struct e820map e820_saved; extern struct e820map e820_saved;
...@@ -137,13 +70,8 @@ static inline bool is_ISA_range(u64 s, u64 e) ...@@ -137,13 +70,8 @@ static inline bool is_ISA_range(u64 s, u64 e)
return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS; return s >= ISA_START_ADDRESS && e <= ISA_END_ADDRESS;
} }
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#ifdef __KERNEL__
#include <linux/ioport.h> #include <linux/ioport.h>
#define HIGH_MEMORY (1024*1024) #define HIGH_MEMORY (1024*1024)
#endif /* __KERNEL__ */
#endif /* _ASM_X86_E820_H */ #endif /* _ASM_X86_E820_H */
#ifndef _I386_HW_BREAKPOINT_H #ifndef _I386_HW_BREAKPOINT_H
#define _I386_HW_BREAKPOINT_H #define _I386_HW_BREAKPOINT_H
#ifdef __KERNEL__ #include <uapi/asm/hw_breakpoint.h>
#define __ARCH_HW_BREAKPOINT_H #define __ARCH_HW_BREAKPOINT_H
/* /*
...@@ -71,6 +72,4 @@ extern int arch_bp_generic_fields(int x86_len, int x86_type, ...@@ -71,6 +72,4 @@ extern int arch_bp_generic_fields(int x86_len, int x86_type,
extern struct pmu perf_ops_bp; extern struct pmu perf_ops_bp;
#endif /* __KERNEL__ */
#endif /* _I386_HW_BREAKPOINT_H */ #endif /* _I386_HW_BREAKPOINT_H */
#ifndef _ASM_X86_IST_H
#define _ASM_X86_IST_H
/* /*
* Include file for the interface to IST BIOS * Include file for the interface to IST BIOS
* Copyright 2002 Andy Grover <andrew.grover@intel.com> * Copyright 2002 Andy Grover <andrew.grover@intel.com>
...@@ -15,20 +12,12 @@ ...@@ -15,20 +12,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details. * General Public License for more details.
*/ */
#ifndef _ASM_X86_IST_H
#define _ASM_X86_IST_H
#include <uapi/asm/ist.h>
#include <linux/types.h>
struct ist_info {
__u32 signature;
__u32 command;
__u32 event;
__u32 perf_level;
};
#ifdef __KERNEL__
extern struct ist_info ist_info; extern struct ist_info ist_info;
#endif /* __KERNEL__ */
#endif /* _ASM_X86_IST_H */ #endif /* _ASM_X86_IST_H */
#ifndef _ASM_X86_KVM_PARA_H #ifndef _ASM_X86_KVM_PARA_H
#define _ASM_X86_KVM_PARA_H #define _ASM_X86_KVM_PARA_H
#include <linux/types.h>
#include <asm/hyperv.h>
/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
* should be used to determine that a VM is running under KVM.
*/
#define KVM_CPUID_SIGNATURE 0x40000000
/* This CPUID returns a feature bitmap in eax. Before enabling a particular
* paravirtualization, the appropriate feature bit should be checked.
*/
#define KVM_CPUID_FEATURES 0x40000001
#define KVM_FEATURE_CLOCKSOURCE 0
#define KVM_FEATURE_NOP_IO_DELAY 1
#define KVM_FEATURE_MMU_OP 2
/* This indicates that the new set of kvmclock msrs
* are available. The use of 0x11 and 0x12 is deprecated
*/
#define KVM_FEATURE_CLOCKSOURCE2 3
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
*/
#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
#define KVM_MSR_ENABLED 1
/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u32 pad[12];
};
#define KVM_STEAL_ALIGNMENT_BITS 5
#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
#define KVM_MMU_OP_FLUSH_TLB 2
#define KVM_MMU_OP_RELEASE_PT 3
/* Payload for KVM_HC_MMU_OP */
struct kvm_mmu_op_header {
__u32 op;
__u32 pad;
};
struct kvm_mmu_op_write_pte {
struct kvm_mmu_op_header header;
__u64 pte_phys;
__u64 pte_val;
};
struct kvm_mmu_op_flush_tlb {
struct kvm_mmu_op_header header;
};
struct kvm_mmu_op_release_pt {
struct kvm_mmu_op_header header;
__u64 pt_phys;
};
#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
#define KVM_PV_REASON_PAGE_READY 2
struct kvm_vcpu_pv_apf_data {
__u32 reason;
__u8 pad[60];
__u32 enabled;
};
#define KVM_PV_EOI_BIT 0
#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
#ifdef __KERNEL__
#include <asm/processor.h> #include <asm/processor.h>
#include <uapi/asm/kvm_para.h>
extern void kvmclock_init(void); extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt); extern int kvm_register_clock(char *txt);
...@@ -228,6 +133,4 @@ static inline void kvm_disable_steal_time(void) ...@@ -228,6 +133,4 @@ static inline void kvm_disable_steal_time(void)
} }
#endif #endif
#endif /* __KERNEL__ */
#endif /* _ASM_X86_KVM_PARA_H */ #endif /* _ASM_X86_KVM_PARA_H */
#ifndef _ASM_X86_MCE_H #ifndef _ASM_X86_MCE_H
#define _ASM_X86_MCE_H #define _ASM_X86_MCE_H
#include <linux/types.h> #include <uapi/asm/mce.h>
#include <asm/ioctls.h>
/*
* Machine Check support for x86
*/
/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
/* MCi_STATUS register defines */
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
#define MCACOD 0xffff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
#define MCACOD_SCRUBMSK 0xfff0
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
#define MCI_MISC_ADDR_PHYS 2 /* physical address */
#define MCI_MISC_ADDR_MEM 3 /* memory address */
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN (1ULL << 30)
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
#define MCJ_CTX_MASK 3
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
#define MCJ_CTX_RANDOM 0 /* inject context: random */
#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
#define MCJ_EXCEPTION 0x8 /* raise as exception */
#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
/* Fields are zero when not available */
struct mce {
__u64 status;
__u64 misc;
__u64 addr;
__u64 mcgstatus;
__u64 ip;
__u64 tsc; /* cpu time stamp counter */
__u64 time; /* wall time_t when error was detected */
__u8 cpuvendor; /* cpu vendor as encoded in system.h */
__u8 inject_flags; /* software inject flags */
__u16 pad;
__u32 cpuid; /* CPUID 1 EAX */
__u8 cs; /* code segment */
__u8 bank; /* machine check bank */
__u8 cpu; /* cpu number; obsolete; use extcpu now */
__u8 finished; /* entry is valid */
__u32 extcpu; /* linux cpu number that detected the error */
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
};
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
#define MCE_LOG_LEN 32
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct mce */
struct mce entry[MCE_LOG_LEN];
};
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
#define MCE_LOG_SIGNATURE "MACHINECHECK"
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
#ifdef __KERNEL__
struct mca_config { struct mca_config {
bool dont_log_ce; bool dont_log_ce;
...@@ -260,5 +144,4 @@ struct cper_sec_mem_err; ...@@ -260,5 +144,4 @@ struct cper_sec_mem_err;
extern void apei_mce_report_mem_error(int corrected, extern void apei_mce_report_mem_error(int corrected,
struct cper_sec_mem_err *mem_err); struct cper_sec_mem_err *mem_err);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MCE_H */ #endif /* _ASM_X86_MCE_H */
#ifndef _ASM_X86_MSR_H #ifndef _ASM_X86_MSR_H
#define _ASM_X86_MSR_H #define _ASM_X86_MSR_H
#include <asm/msr-index.h> #include <uapi/asm/msr.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/ioctl.h>
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
#ifdef __KERNEL__
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/cpumask.h> #include <asm/cpumask.h>
...@@ -271,6 +263,5 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) ...@@ -271,6 +263,5 @@ static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
return wrmsr_safe_regs(regs); return wrmsr_safe_regs(regs);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_MSR_H */ #endif /* _ASM_X86_MSR_H */
...@@ -23,97 +23,8 @@ ...@@ -23,97 +23,8 @@
#ifndef _ASM_X86_MTRR_H #ifndef _ASM_X86_MTRR_H
#define _ASM_X86_MTRR_H #define _ASM_X86_MTRR_H
#include <linux/types.h> #include <uapi/asm/mtrr.h>
#include <linux/ioctl.h>
#include <linux/errno.h>
#define MTRR_IOCTL_BASE 'M'
/* Warning: this structure has a different order from i386
on x86-64. The 32bit emulation code takes care of that.
But you need to use this for 64bit, otherwise your X server
will break. */
#ifdef __i386__
struct mtrr_sentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
};
#else /* __i386__ */
struct mtrr_sentry {
__u64 base; /* Base address */
__u32 size; /* Size of region */
__u32 type; /* Type of region */
};
struct mtrr_gentry {
__u64 base; /* Base address */
__u32 size; /* Size of region */
__u32 regnum; /* Register number */
__u32 type; /* Type of region */
__u32 _pad; /* Unused */
};
#endif /* !__i386__ */
struct mtrr_var_range {
__u32 base_lo;
__u32 base_hi;
__u32 mask_lo;
__u32 mask_hi;
};
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
typedef __u8 mtrr_type;
#define MTRR_NUM_FIXED_RANGES 88
#define MTRR_MAX_VAR_RANGES 256
struct mtrr_state_type {
struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
unsigned char enabled;
unsigned char have_fixed;
mtrr_type def_type;
};
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#ifdef __KERNEL__
/* The following functions are for use by other drivers */ /* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR # ifdef CONFIG_MTRR
...@@ -208,6 +119,4 @@ struct mtrr_gentry32 { ...@@ -208,6 +119,4 @@ struct mtrr_gentry32 {
_IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32) _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry32)
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_MTRR_H */ #endif /* _ASM_X86_MTRR_H */
#ifdef __KERNEL__
# ifdef CONFIG_X86_32 # ifdef CONFIG_X86_32
# include <asm/posix_types_32.h> # include <asm/posix_types_32.h>
# else # else
# include <asm/posix_types_64.h> # include <asm/posix_types_64.h>
# endif # endif
#else
# ifdef __i386__
# include <asm/posix_types_32.h>
# elif defined(__ILP32__)
# include <asm/posix_types_x32.h>
# else
# include <asm/posix_types_64.h>
# endif
#endif
#ifndef _ASM_X86_PROCESSOR_FLAGS_H #ifndef _ASM_X86_PROCESSOR_FLAGS_H
#define _ASM_X86_PROCESSOR_FLAGS_H #define _ASM_X86_PROCESSOR_FLAGS_H
/* Various flags defined: can be included from assembler. */
/* #include <uapi/asm/processor-flags.h>
* EFLAGS bits
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
/*
* Basic CPU control in CR0
*/
#define X86_CR0_PE 0x00000001 /* Protection Enable */
#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
#define X86_CR0_EM 0x00000004 /* Emulation */
#define X86_CR0_TS 0x00000008 /* Task Switched */
#define X86_CR0_ET 0x00000010 /* Extension Type */
#define X86_CR0_NE 0x00000020 /* Numeric Error */
#define X86_CR0_WP 0x00010000 /* Write Protect */
#define X86_CR0_AM 0x00040000 /* Alignment Mask */
#define X86_CR0_NW 0x20000000 /* Not Write-through */
#define X86_CR0_CD 0x40000000 /* Cache Disable */
#define X86_CR0_PG 0x80000000 /* Paging */
/*
* Paging options in CR3
*/
#define X86_CR3_PWT 0x00000008 /* Page Write Through */
#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
/*
* Intel CPU features in CR4
*/
#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
#define X86_CR4_MCE 0x00000040 /* Machine check enable */
#define X86_CR4_PGE 0x00000080 /* enable global pages */
#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
/*
* x86-64 Task Priority Register, CR8
*/
#define X86_CR8_TPR 0x0000000F /* task priority register */
/*
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
*/
/*
* NSC/Cyrix CPU configuration register indexes
*/
#define CX86_PCR0 0x20
#define CX86_GCR 0xb8
#define CX86_CCR0 0xc0
#define CX86_CCR1 0xc1
#define CX86_CCR2 0xc2
#define CX86_CCR3 0xc3
#define CX86_CCR4 0xe8
#define CX86_CCR5 0xe9
#define CX86_CCR6 0xea
#define CX86_CCR7 0xeb
#define CX86_PCR1 0xf0
#define CX86_DIR0 0xfe
#define CX86_DIR1 0xff
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
#ifdef __KERNEL__
#ifdef CONFIG_VM86 #ifdef CONFIG_VM86
#define X86_VM_MASK X86_EFLAGS_VM #define X86_VM_MASK X86_EFLAGS_VM
#else #else
#define X86_VM_MASK 0 /* No VM86 support */ #define X86_VM_MASK 0 /* No VM86 support */
#endif #endif
#endif
#endif /* _ASM_X86_PROCESSOR_FLAGS_H */ #endif /* _ASM_X86_PROCESSOR_FLAGS_H */
#ifndef _ASM_X86_PTRACE_H #ifndef _ASM_X86_PTRACE_H
#define _ASM_X86_PTRACE_H #define _ASM_X86_PTRACE_H
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
#include <asm/processor-flags.h>
#ifdef __KERNEL__
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page_types.h> #include <asm/page_types.h>
#endif #include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef __i386__ #ifdef __i386__
/* this struct defines the way the registers are stored on the
stack during a system call. */
#ifndef __KERNEL__
struct pt_regs {
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
int xds;
int xes;
int xfs;
int xgs;
long orig_eax;
long eip;
int xcs;
long eflags;
long esp;
int xss;
};
#else /* __KERNEL__ */
struct pt_regs { struct pt_regs {
unsigned long bx; unsigned long bx;
...@@ -60,42 +28,8 @@ struct pt_regs { ...@@ -60,42 +28,8 @@ struct pt_regs {
unsigned long ss; unsigned long ss;
}; };
#endif /* __KERNEL__ */
#else /* __i386__ */ #else /* __i386__ */
#ifndef __KERNEL__
struct pt_regs {
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long rax;
unsigned long rcx;
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
unsigned long orig_rax;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long rip;
unsigned long cs;
unsigned long eflags;
unsigned long rsp;
unsigned long ss;
/* top of stack page */
};
#else /* __KERNEL__ */
struct pt_regs { struct pt_regs {
unsigned long r15; unsigned long r15;
unsigned long r14; unsigned long r14;
...@@ -124,12 +58,8 @@ struct pt_regs { ...@@ -124,12 +58,8 @@ struct pt_regs {
/* top of stack page */ /* top of stack page */
}; };
#endif /* __KERNEL__ */
#endif /* !__i386__ */ #endif /* !__i386__ */
#ifdef __KERNEL__
#include <linux/init.h> #include <linux/init.h>
#ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h> #include <asm/paravirt_types.h>
...@@ -301,8 +231,5 @@ extern int do_get_thread_area(struct task_struct *p, int idx, ...@@ -301,8 +231,5 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx, extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate); struct user_desc __user *info, int can_allocate);
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PTRACE_H */ #endif /* _ASM_X86_PTRACE_H */
#ifndef _ASM_X86_SETUP_H #ifndef _ASM_X86_SETUP_H
#define _ASM_X86_SETUP_H #define _ASM_X86_SETUP_H
#ifdef __KERNEL__ #include <uapi/asm/setup.h>
#define COMMAND_LINE_SIZE 2048 #define COMMAND_LINE_SIZE 2048
...@@ -123,6 +124,4 @@ void __init x86_64_start_reservations(char *real_mode_data); ...@@ -123,6 +124,4 @@ void __init x86_64_start_reservations(char *real_mode_data);
.size .brk.name,.-1b; \ .size .brk.name,.-1b; \
.popsection .popsection
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_SETUP_H */ #endif /* _ASM_X86_SETUP_H */
#ifndef _ASM_X86_SIGCONTEXT_H #ifndef _ASM_X86_SIGCONTEXT_H
#define _ASM_X86_SIGCONTEXT_H #define _ASM_X86_SIGCONTEXT_H
#include <linux/compiler.h> #include <uapi/asm/sigcontext.h>
#include <linux/types.h>
#define FP_XSTATE_MAGIC1 0x46505853U
#define FP_XSTATE_MAGIC2 0x46505845U
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
/*
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
* are used to extended the fpstate pointer in the sigcontext, which now
* includes the extended state information along with fpstate information.
*
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
* area and FP_XSTATE_MAGIC2 at the end of memory layout
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
* extended state information in the memory layout pointed by the fpstate
* pointer in sigcontext.
*/
struct _fpx_sw_bytes {
__u32 magic1; /* FP_XSTATE_MAGIC1 */
__u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext.
*/
__u64 xstate_bv;
/* feature bit mask (including fp/sse/extended
* state) that is present in the memory
* layout.
*/
__u32 xstate_size; /* actual xsave state size, based on the
* features saved in the layout.
* 'extended_size' will be greater than
* 'xstate_size'.
*/
__u32 padding[7]; /* for future use. */
};
#ifdef __i386__ #ifdef __i386__
/*
* As documented in the iBCS2 standard..
*
* The first part of "struct _fpstate" is just the normal i387
* hardware setup, the extra "status" word is used to save the
* coprocessor status word before entering the handler.
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* The FPU state data structure has had to grow to accommodate the
* extended FPU state required by the Streaming SIMD Extensions.
* There is no documented standard to accomplish this at the moment.
*/
struct _fpreg {
unsigned short significand[4];
unsigned short exponent;
};
struct _fpxreg {
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
};
struct _xmmreg {
unsigned long element[4];
};
struct _fpstate {
/* Regular FPU environment */
unsigned long cw;
unsigned long sw;
unsigned long tag;
unsigned long ipoff;
unsigned long cssel;
unsigned long dataoff;
unsigned long datasel;
struct _fpreg _st[8];
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
/* FXSR FPU environment */
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned long mxcsr;
unsigned long reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg _xmm[8];
unsigned long padding1[44];
union {
unsigned long padding2[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state info */
};
};
#define X86_FXSR_MAGIC 0x0000
#ifdef __KERNEL__
struct sigcontext { struct sigcontext {
unsigned short gs, __gsh; unsigned short gs, __gsh;
unsigned short fs, __fsh; unsigned short fs, __fsh;
...@@ -131,62 +36,7 @@ struct sigcontext { ...@@ -131,62 +36,7 @@ struct sigcontext {
unsigned long oldmask; unsigned long oldmask;
unsigned long cr2; unsigned long cr2;
}; };
#else /* __KERNEL__ */
/*
* User-space might still rely on the old definition:
*/
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
#endif /* !__KERNEL__ */
#else /* __i386__ */ #else /* __i386__ */
/* FXSAVE frame */
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
them when you change signal frames. */
struct _fpstate {
__u16 cwd;
__u16 swd;
__u16 twd; /* Note this is not the same as the
32bit/x87/FSAVE twd */
__u16 fop;
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcsr_mask;
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
__u32 reserved2[12];
union {
__u32 reserved3[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state information */
};
};
#ifdef __KERNEL__
struct sigcontext { struct sigcontext {
unsigned long r8; unsigned long r8;
unsigned long r9; unsigned long r9;
...@@ -225,69 +75,5 @@ struct sigcontext { ...@@ -225,69 +75,5 @@ struct sigcontext {
void __user *fpstate; /* zero when no FPU/extended context */ void __user *fpstate; /* zero when no FPU/extended context */
unsigned long reserved1[8]; unsigned long reserved1[8];
}; };
#else /* __KERNEL__ */
/*
* User-space might still rely on the old definition:
*/
struct sigcontext {
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 rdi;
__u64 rsi;
__u64 rbp;
__u64 rbx;
__u64 rdx;
__u64 rax;
__u64 rcx;
__u64 rsp;
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
__u64 cr2;
struct _fpstate __user *fpstate; /* zero when no FPU context */
#ifdef __ILP32__
__u32 __fpstate_pad;
#endif
__u64 reserved1[8];
};
#endif /* !__KERNEL__ */
#endif /* !__i386__ */ #endif /* !__i386__ */
struct _xsave_hdr {
__u64 xstate_bv;
__u64 reserved1[2];
__u64 reserved2[5];
};
struct _ymmh_state {
/* 16 * 16 bytes for each YMMH-reg */
__u32 ymmh_space[64];
};
/*
* Extended state pointed by the fpstate pointer in the sigcontext.
* In addition to the fpstate, information encoded in the xstate_hdr
* indicates the presence of other extended state information
* supported by the processor and OS.
*/
struct _xstate {
struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr;
struct _ymmh_state ymmh;
/* new processor state extensions go here */
};
#endif /* _ASM_X86_SIGCONTEXT_H */ #endif /* _ASM_X86_SIGCONTEXT_H */
...@@ -2,14 +2,6 @@ ...@@ -2,14 +2,6 @@
#define _ASM_X86_SIGNAL_H #define _ASM_X86_SIGNAL_H
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/time.h>
#include <linux/compiler.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#ifdef __KERNEL__
#include <linux/linkage.h> #include <linux/linkage.h>
/* Most things should be clean enough to redefine this at will, if care /* Most things should be clean enough to redefine this at will, if care
...@@ -35,102 +27,11 @@ typedef struct { ...@@ -35,102 +27,11 @@ typedef struct {
typedef sigset_t compat_sigset_t; typedef sigset_t compat_sigset_t;
#endif #endif
#else
/* Here we must cater to libcs that poke about in kernel headers. */
#define NSIG 32
typedef unsigned long sigset_t;
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#include <uapi/asm/signal.h>
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001u
#define SA_NOCLDWAIT 0x00000002u
#define SA_SIGINFO 0x00000004u
#define SA_ONSTACK 0x08000000u
#define SA_RESTART 0x10000000u
#define SA_NODEFER 0x40000000u
#define SA_RESETHAND 0x80000000u
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#include <asm-generic/signal-defs.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
# ifdef __KERNEL__
extern void do_notify_resume(struct pt_regs *, void *, __u32); extern void do_notify_resume(struct pt_regs *, void *, __u32);
# endif /* __KERNEL__ */
#ifdef __i386__ #ifdef __i386__
# ifdef __KERNEL__
struct old_sigaction { struct old_sigaction {
__sighandler_t sa_handler; __sighandler_t sa_handler;
old_sigset_t sa_mask; old_sigset_t sa_mask;
...@@ -149,45 +50,8 @@ struct k_sigaction { ...@@ -149,45 +50,8 @@ struct k_sigaction {
struct sigaction sa; struct sigaction sa;
}; };
# else /* __KERNEL__ */
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
# endif /* ! __KERNEL__ */
#else /* __i386__ */ #else /* __i386__ */
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#endif /* !__i386__ */ #endif /* !__i386__ */
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
#include <asm/sigcontext.h> #include <asm/sigcontext.h>
#ifdef __i386__ #ifdef __i386__
...@@ -260,7 +124,5 @@ struct pt_regs; ...@@ -260,7 +124,5 @@ struct pt_regs;
#endif /* !__i386__ */ #endif /* !__i386__ */
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SIGNAL_H */ #endif /* _ASM_X86_SIGNAL_H */
#ifndef __SVM_H #ifndef __SVM_H
#define __SVM_H #define __SVM_H
#define SVM_EXIT_READ_CR0 0x000 #include <uapi/asm/svm.h>
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
#define SVM_EXIT_READ_CR8 0x008
#define SVM_EXIT_WRITE_CR0 0x010
#define SVM_EXIT_WRITE_CR3 0x013
#define SVM_EXIT_WRITE_CR4 0x014
#define SVM_EXIT_WRITE_CR8 0x018
#define SVM_EXIT_READ_DR0 0x020
#define SVM_EXIT_READ_DR1 0x021
#define SVM_EXIT_READ_DR2 0x022
#define SVM_EXIT_READ_DR3 0x023
#define SVM_EXIT_READ_DR4 0x024
#define SVM_EXIT_READ_DR5 0x025
#define SVM_EXIT_READ_DR6 0x026
#define SVM_EXIT_READ_DR7 0x027
#define SVM_EXIT_WRITE_DR0 0x030
#define SVM_EXIT_WRITE_DR1 0x031
#define SVM_EXIT_WRITE_DR2 0x032
#define SVM_EXIT_WRITE_DR3 0x033
#define SVM_EXIT_WRITE_DR4 0x034
#define SVM_EXIT_WRITE_DR5 0x035
#define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040
#define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062
#define SVM_EXIT_INIT 0x063
#define SVM_EXIT_VINTR 0x064
#define SVM_EXIT_CR0_SEL_WRITE 0x065
#define SVM_EXIT_IDTR_READ 0x066
#define SVM_EXIT_GDTR_READ 0x067
#define SVM_EXIT_LDTR_READ 0x068
#define SVM_EXIT_TR_READ 0x069
#define SVM_EXIT_IDTR_WRITE 0x06a
#define SVM_EXIT_GDTR_WRITE 0x06b
#define SVM_EXIT_LDTR_WRITE 0x06c
#define SVM_EXIT_TR_WRITE 0x06d
#define SVM_EXIT_RDTSC 0x06e
#define SVM_EXIT_RDPMC 0x06f
#define SVM_EXIT_PUSHF 0x070
#define SVM_EXIT_POPF 0x071
#define SVM_EXIT_CPUID 0x072
#define SVM_EXIT_RSM 0x073
#define SVM_EXIT_IRET 0x074
#define SVM_EXIT_SWINT 0x075
#define SVM_EXIT_INVD 0x076
#define SVM_EXIT_PAUSE 0x077
#define SVM_EXIT_HLT 0x078
#define SVM_EXIT_INVLPG 0x079
#define SVM_EXIT_INVLPGA 0x07a
#define SVM_EXIT_IOIO 0x07b
#define SVM_EXIT_MSR 0x07c
#define SVM_EXIT_TASK_SWITCH 0x07d
#define SVM_EXIT_FERR_FREEZE 0x07e
#define SVM_EXIT_SHUTDOWN 0x07f
#define SVM_EXIT_VMRUN 0x080
#define SVM_EXIT_VMMCALL 0x081
#define SVM_EXIT_VMLOAD 0x082
#define SVM_EXIT_VMSAVE 0x083
#define SVM_EXIT_STGI 0x084
#define SVM_EXIT_CLGI 0x085
#define SVM_EXIT_SKINIT 0x086
#define SVM_EXIT_RDTSCP 0x087
#define SVM_EXIT_ICEBP 0x088
#define SVM_EXIT_WBINVD 0x089
#define SVM_EXIT_MONITOR 0x08a
#define SVM_EXIT_MWAIT 0x08b
#define SVM_EXIT_MWAIT_COND 0x08c
#define SVM_EXIT_XSETBV 0x08d
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
{ SVM_EXIT_READ_DR0, "read_dr0" }, \
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
{ SVM_EXIT_INTR, "interrupt" }, \
{ SVM_EXIT_NMI, "nmi" }, \
{ SVM_EXIT_SMI, "smi" }, \
{ SVM_EXIT_INIT, "init" }, \
{ SVM_EXIT_VINTR, "vintr" }, \
{ SVM_EXIT_CPUID, "cpuid" }, \
{ SVM_EXIT_INVD, "invd" }, \
{ SVM_EXIT_HLT, "hlt" }, \
{ SVM_EXIT_INVLPG, "invlpg" }, \
{ SVM_EXIT_INVLPGA, "invlpga" }, \
{ SVM_EXIT_IOIO, "io" }, \
{ SVM_EXIT_MSR, "msr" }, \
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
{ SVM_EXIT_VMRUN, "vmrun" }, \
{ SVM_EXIT_VMMCALL, "hypercall" }, \
{ SVM_EXIT_VMLOAD, "vmload" }, \
{ SVM_EXIT_VMSAVE, "vmsave" }, \
{ SVM_EXIT_STGI, "stgi" }, \
{ SVM_EXIT_CLGI, "clgi" }, \
{ SVM_EXIT_SKINIT, "skinit" }, \
{ SVM_EXIT_WBINVD, "wbinvd" }, \
{ SVM_EXIT_MONITOR, "monitor" }, \
{ SVM_EXIT_MWAIT, "mwait" }, \
{ SVM_EXIT_XSETBV, "xsetbv" }, \
{ SVM_EXIT_NPF, "npf" }
#ifdef __KERNEL__
enum { enum {
INTERCEPT_INTR, INTERCEPT_INTR,
...@@ -403,5 +277,3 @@ struct __attribute__ ((__packed__)) vmcb { ...@@ -403,5 +277,3 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf" #define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
#endif #endif
#endif
#ifndef _ASM_X86_UNISTD_H #ifndef _ASM_X86_UNISTD_H
#define _ASM_X86_UNISTD_H 1 #define _ASM_X86_UNISTD_H 1
/* x32 syscall flag bit */ #include <uapi/asm/unistd.h>
#define __X32_SYSCALL_BIT 0x40000000
#ifdef __KERNEL__
# ifdef CONFIG_X86_X32_ABI # ifdef CONFIG_X86_X32_ABI
# define __SYSCALL_MASK (~(__X32_SYSCALL_BIT)) # define __SYSCALL_MASK (~(__X32_SYSCALL_BIT))
...@@ -63,14 +61,4 @@ ...@@ -63,14 +61,4 @@
*/ */
# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") # define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
#else
# ifdef __i386__
# include <asm/unistd_32.h>
# elif defined(__ILP32__)
# include <asm/unistd_x32.h>
# else
# include <asm/unistd_64.h>
# endif
#endif
#endif /* _ASM_X86_UNISTD_H */ #endif /* _ASM_X86_UNISTD_H */
#ifndef _ASM_X86_VM86_H #ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H #define _ASM_X86_VM86_H
/*
* I'm guessing at the VIF/VIP flag usage, but hope that this is how
* the Pentium uses them. Linux will return from vm86 mode when both
* VIF and VIP is set.
*
* On a Pentium, we could probably optimize the virtual flags directly
* in the eflags register instead of doing it "by hand" in vflags...
*
* Linus
*/
#include <asm/processor-flags.h>
#define BIOSSEG 0x0f000
#define CPU_086 0
#define CPU_186 1
#define CPU_286 2
#define CPU_386 3
#define CPU_486 4
#define CPU_586 5
/*
* Return values for the 'vm86()' system call
*/
#define VM86_TYPE(retval) ((retval) & 0xff)
#define VM86_ARG(retval) ((retval) >> 8)
#define VM86_SIGNAL 0 /* return due to signal */
#define VM86_UNKNOWN 1 /* unhandled GP fault
- IO-instruction or similar */
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
#define VM86_STI 3 /* sti/popf/iret instruction enabled
virtual interrupts */
/*
* Additional return values when invoking new vm86()
*/
#define VM86_PICRETURN 4 /* return due to pending PIC request */
#define VM86_TRAP 6 /* return due to DOS-debugger request */
/*
* function codes when invoking new vm86()
*/
#define VM86_PLUS_INSTALL_CHECK 0
#define VM86_ENTER 1
#define VM86_ENTER_NO_BYPASS 2
#define VM86_REQUEST_IRQ 3
#define VM86_FREE_IRQ 4
#define VM86_GET_IRQ_BITS 5
#define VM86_GET_AND_RESET_IRQ 6
/*
* This is the stack-layout seen by the user space program when we have
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
* is 'kernel_vm86_regs' (see below).
*/
struct vm86_regs {
/*
* normal regs, with special meaning for the segment descriptors..
*/
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
long __null_ds;
long __null_es;
long __null_fs;
long __null_gs;
long orig_eax;
long eip;
unsigned short cs, __csh;
long eflags;
long esp;
unsigned short ss, __ssh;
/*
* these are specific to v86 mode:
*/
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
};
struct revectored_struct {
unsigned long __map[8]; /* 256 bits */
};
struct vm86_struct {
struct vm86_regs regs;
unsigned long flags;
unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
};
/*
* flags masks
*/
#define VM86_SCREEN_BITMAP 0x0001
struct vm86plus_info_struct {
unsigned long force_return_for_pic:1;
unsigned long vm86dbg_active:1; /* for debugger */
unsigned long vm86dbg_TFpendig:1; /* for debugger */
unsigned long unused:28;
unsigned long is_vm86pus:1; /* for vm86 internal use */
unsigned char vm86dbg_intxxtab[32]; /* for debugger */
};
struct vm86plus_struct {
struct vm86_regs regs;
unsigned long flags;
unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
struct vm86plus_info_struct vm86plus;
};
#ifdef __KERNEL__
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <uapi/asm/vm86.h>
/* /*
* This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86 * This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
...@@ -203,6 +79,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c) ...@@ -203,6 +79,4 @@ static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
#endif /* CONFIG_VM86 */ #endif /* CONFIG_VM86 */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_VM86_H */ #endif /* _ASM_X86_VM86_H */
#ifndef VMX_H
#define VMX_H
/* /*
* vmx.h: VMX Architecture related definitions * vmx.h: VMX Architecture related definitions
* Copyright (c) 2004, Intel Corporation. * Copyright (c) 2004, Intel Corporation.
...@@ -24,90 +21,12 @@ ...@@ -24,90 +21,12 @@
* Yaniv Kamay <yaniv@qumranet.com> * Yaniv Kamay <yaniv@qumranet.com>
* *
*/ */
#ifndef VMX_H
#define VMX_H
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_PENDING_INTERRUPT 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
#define EXIT_REASON_HLT 12
#define EXIT_REASON_INVD 13
#define EXIT_REASON_INVLPG 14
#define EXIT_REASON_RDPMC 15
#define EXIT_REASON_RDTSC 16
#define EXIT_REASON_VMCALL 18
#define EXIT_REASON_VMCLEAR 19
#define EXIT_REASON_VMLAUNCH 20
#define EXIT_REASON_VMPTRLD 21
#define EXIT_REASON_VMPTRST 22
#define EXIT_REASON_VMREAD 23
#define EXIT_REASON_VMRESUME 24
#define EXIT_REASON_VMWRITE 25
#define EXIT_REASON_VMOFF 26
#define EXIT_REASON_VMON 27
#define EXIT_REASON_CR_ACCESS 28
#define EXIT_REASON_DR_ACCESS 29
#define EXIT_REASON_IO_INSTRUCTION 30
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_INVALID_STATE 33
#define EXIT_REASON_MWAIT_INSTRUCTION 36
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_INVPCID 58
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
{ EXIT_REASON_HLT, "HLT" }, \
{ EXIT_REASON_INVLPG, "INVLPG" }, \
{ EXIT_REASON_RDPMC, "RDPMC" }, \
{ EXIT_REASON_RDTSC, "RDTSC" }, \
{ EXIT_REASON_VMCALL, "VMCALL" }, \
{ EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
{ EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
{ EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
{ EXIT_REASON_VMPTRST, "VMPTRST" }, \
{ EXIT_REASON_VMREAD, "VMREAD" }, \
{ EXIT_REASON_VMRESUME, "VMRESUME" }, \
{ EXIT_REASON_VMWRITE, "VMWRITE" }, \
{ EXIT_REASON_VMOFF, "VMOFF" }, \
{ EXIT_REASON_VMON, "VMON" }, \
{ EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
{ EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_WBINVD, "WBINVD" }
#ifdef __KERNEL__
#include <linux/types.h> #include <linux/types.h>
#include <uapi/asm/vmx.h>
/* /*
* Definitions of Primary Processor-Based VM-Execution Controls. * Definitions of Primary Processor-Based VM-Execution Controls.
...@@ -526,5 +445,3 @@ enum vm_instruction_error_number { ...@@ -526,5 +445,3 @@ enum vm_instruction_error_number {
}; };
#endif #endif
#endif
#ifndef _ASM_X86_VSYSCALL_H #ifndef _ASM_X86_VSYSCALL_H
#define _ASM_X86_VSYSCALL_H #define _ASM_X86_VSYSCALL_H
enum vsyscall_num {
__NR_vgettimeofday,
__NR_vtime,
__NR_vgetcpu,
};
#define VSYSCALL_START (-10UL << 20)
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
#define VSYSCALL_MAPPED_PAGES 1
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
#ifdef __KERNEL__
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <uapi/asm/vsyscall.h>
#define VGETCPU_RDTSCP 1 #define VGETCPU_RDTSCP 1
#define VGETCPU_LSL 2 #define VGETCPU_LSL 2
...@@ -53,6 +41,4 @@ static inline unsigned int __getcpu(void) ...@@ -53,6 +41,4 @@ static inline unsigned int __getcpu(void)
} }
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_VSYSCALL_H */ #endif /* _ASM_X86_VSYSCALL_H */
...@@ -4,3 +4,61 @@ include include/uapi/asm-generic/Kbuild.asm ...@@ -4,3 +4,61 @@ include include/uapi/asm-generic/Kbuild.asm
genhdr-y += unistd_32.h genhdr-y += unistd_32.h
genhdr-y += unistd_64.h genhdr-y += unistd_64.h
genhdr-y += unistd_x32.h genhdr-y += unistd_x32.h
header-y += a.out.h
header-y += auxvec.h
header-y += bitsperlong.h
header-y += boot.h
header-y += bootparam.h
header-y += byteorder.h
header-y += debugreg.h
header-y += e820.h
header-y += errno.h
header-y += fcntl.h
header-y += hw_breakpoint.h
header-y += hyperv.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
header-y += ist.h
header-y += kvm.h
header-y += kvm_para.h
header-y += ldt.h
header-y += mce.h
header-y += mman.h
header-y += msgbuf.h
header-y += msr-index.h
header-y += msr.h
header-y += mtrr.h
header-y += param.h
header-y += perf_regs.h
header-y += poll.h
header-y += posix_types.h
header-y += posix_types_32.h
header-y += posix_types_64.h
header-y += posix_types_x32.h
header-y += prctl.h
header-y += processor-flags.h
header-y += ptrace-abi.h
header-y += ptrace.h
header-y += resource.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
header-y += sigcontext.h
header-y += sigcontext32.h
header-y += siginfo.h
header-y += signal.h
header-y += socket.h
header-y += sockios.h
header-y += stat.h
header-y += statfs.h
header-y += svm.h
header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += types.h
header-y += ucontext.h
header-y += unistd.h
header-y += vm86.h
header-y += vmx.h
header-y += vsyscall.h
#ifndef _UAPI_ASM_X86_BOOT_H
#define _UAPI_ASM_X86_BOOT_H
/* Internal svga startup constants */
#define NORMAL_VGA 0xffff /* 80x25 mode */
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
#endif /* _UAPI_ASM_X86_BOOT_H */
#ifndef _UAPI_ASM_X86_DEBUGREG_H
#define _UAPI_ASM_X86_DEBUGREG_H
/* Indicate the register numbers for a number of the specific
debug registers. Registers 0-3 contain the addresses we wish to trap on */
#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
/* Define a few things for the status register. We can use this to determine
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
/* Define reserved bits in DR6 which are always set to 1 */
#define DR6_RESERVED (0xFFFF0FF0)
#define DR_TRAP0 (0x1) /* db0 */
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
#define DR_TRAP3 (0x8) /* db3 */
#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
/* Now define a bunch of things for manipulating the control register.
The top two bytes of the control register consist of 4 fields of 4
bits - each field corresponds to one of the four debug registers,
and indicates what types of access we trap on, and how large the data
field is that we are looking at */
#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
#define DR_RW_WRITE (0x1)
#define DR_RW_READ (0x3)
#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
#define DR_LEN_2 (0x4)
#define DR_LEN_4 (0xC)
#define DR_LEN_8 (0x8)
/* The low byte to the control register determine which registers are
enabled. There are 4 fields of two bits. One bit is "local", meaning
that the processor will reset the bit after a task switch and the other
is global meaning that we have to explicitly reset the bit. With linux,
you can use either one, since we explicitly zero the register when we enter
kernel mode. */
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
#define DR_LOCAL_ENABLE (0x1) /* Local enable for reg 0 */
#define DR_GLOBAL_ENABLE (0x2) /* Global enable for reg 0 */
#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
/* The second byte to the control register has a few special things.
We can slow the instruction pipeline for instructions coming via the
gdt or the ldt if we want to. I am not sure why this is an advantage */
#ifdef __i386__
#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
#else
#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
#endif
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
/*
* HW breakpoint additions
*/
#endif /* _UAPI_ASM_X86_DEBUGREG_H */
#ifndef _UAPI_ASM_X86_E820_H
#define _UAPI_ASM_X86_E820_H
#define E820MAP 0x2d0 /* our map */
#define E820MAX 128 /* number of entries in E820MAP */
/*
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
* constrained space in the zeropage. If we have more nodes than
* that, and if we've booted off EFI firmware, then the EFI tables
* passed us from the EFI firmware can list more nodes. Size our
* internal memory map tables to have room for these additional
* nodes, based on up to three entries per node for which the
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
* plus E820MAX, allowing space for the possible duplicate E820
* entries that might need room in the same arrays, prior to the
* call to sanitize_e820_map() to remove duplicates. The allowance
* of three memory map entries per node is "enough" entries for
* the initial hardware platform motivating this mechanism to make
* use of additional EFI map entries. Future platforms may want
* to allow more than three entries per node or otherwise refine
* this size.
*/
/*
* Odd: 'make headers_check' complains about numa.h if I try
* to collapse the next two #ifdef lines to a single line:
* #if defined(__KERNEL__) && defined(CONFIG_EFI)
*/
#ifndef __KERNEL__
#define E820_X_MAX E820MAX
#endif
#define E820NR 0x1e8 /* # entries in E820MAP */
#define E820_RAM 1
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
/*
* reserved RAM used by kernel itself
* if CONFIG_INTEL_TXT is enabled, memory of this type will be
* included in the S3 integrity calculation and so should not include
* any memory that BIOS might alter over the S3 transition
*/
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct e820entry {
__u64 addr; /* start of memory segment */
__u64 size; /* size of memory segment */
__u32 type; /* type of memory segment */
} __attribute__((packed));
struct e820map {
__u32 nr_map;
struct e820entry map[E820_X_MAX];
};
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
#define BIOS_ROM_BASE 0xffe00000
#define BIOS_ROM_END 0xffffffff
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_ASM_X86_E820_H */
/*
* Include file for the interface to IST BIOS
* Copyright 2002 Andy Grover <andrew.grover@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _UAPI_ASM_X86_IST_H
#define _UAPI_ASM_X86_IST_H
#include <linux/types.h>
struct ist_info {
__u32 signature;
__u32 command;
__u32 event;
__u32 perf_level;
};
#endif /* _UAPI_ASM_X86_IST_H */
#ifndef _UAPI_ASM_X86_KVM_PARA_H
#define _UAPI_ASM_X86_KVM_PARA_H
#include <linux/types.h>
#include <asm/hyperv.h>
/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
* should be used to determine that a VM is running under KVM.
*/
#define KVM_CPUID_SIGNATURE 0x40000000
/* This CPUID returns a feature bitmap in eax. Before enabling a particular
* paravirtualization, the appropriate feature bit should be checked.
*/
#define KVM_CPUID_FEATURES 0x40000001
#define KVM_FEATURE_CLOCKSOURCE 0
#define KVM_FEATURE_NOP_IO_DELAY 1
#define KVM_FEATURE_MMU_OP 2
/* This indicates that the new set of kvmclock msrs
* are available. The use of 0x11 and 0x12 is deprecated
*/
#define KVM_FEATURE_CLOCKSOURCE2 3
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
*/
#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
#define MSR_KVM_WALL_CLOCK 0x11
#define MSR_KVM_SYSTEM_TIME 0x12
#define KVM_MSR_ENABLED 1
/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u32 pad[12];
};
#define KVM_STEAL_ALIGNMENT_BITS 5
#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
#define KVM_MMU_OP_FLUSH_TLB 2
#define KVM_MMU_OP_RELEASE_PT 3
/* Payload for KVM_HC_MMU_OP */
struct kvm_mmu_op_header {
__u32 op;
__u32 pad;
};
struct kvm_mmu_op_write_pte {
struct kvm_mmu_op_header header;
__u64 pte_phys;
__u64 pte_val;
};
struct kvm_mmu_op_flush_tlb {
struct kvm_mmu_op_header header;
};
struct kvm_mmu_op_release_pt {
struct kvm_mmu_op_header header;
__u64 pt_phys;
};
#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
#define KVM_PV_REASON_PAGE_READY 2
struct kvm_vcpu_pv_apf_data {
__u32 reason;
__u8 pad[60];
__u32 enabled;
};
#define KVM_PV_EOI_BIT 0
#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
#define KVM_PV_EOI_DISABLED 0x0
#endif /* _UAPI_ASM_X86_KVM_PARA_H */
#ifndef _UAPI_ASM_X86_MCE_H
#define _UAPI_ASM_X86_MCE_H
#include <linux/types.h>
#include <asm/ioctls.h>
/*
* Machine Check support for x86
*/
/* MCG_CAP register defines */
#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
#define MCG_EXT_CNT_SHIFT 16
#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
/* MCG_STATUS register defines */
#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
/* MCi_STATUS register defines */
#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
#define MCACOD 0xffff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
#define MCACOD_SCRUBMSK 0xfff0
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
/* MCi_MISC register defines */
#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
#define MCI_MISC_ADDR_PHYS 2 /* physical address */
#define MCI_MISC_ADDR_MEM 3 /* memory address */
#define MCI_MISC_ADDR_GENERIC 7 /* generic */
/* CTL2 register defines */
#define MCI_CTL2_CMCI_EN (1ULL << 30)
#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
#define MCJ_CTX_MASK 3
#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
#define MCJ_CTX_RANDOM 0 /* inject context: random */
#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
#define MCJ_EXCEPTION 0x8 /* raise as exception */
#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
/* Fields are zero when not available */
struct mce {
__u64 status;
__u64 misc;
__u64 addr;
__u64 mcgstatus;
__u64 ip;
__u64 tsc; /* cpu time stamp counter */
__u64 time; /* wall time_t when error was detected */
__u8 cpuvendor; /* cpu vendor as encoded in system.h */
__u8 inject_flags; /* software inject flags */
__u16 pad;
__u32 cpuid; /* CPUID 1 EAX */
__u8 cs; /* code segment */
__u8 bank; /* machine check bank */
__u8 cpu; /* cpu number; obsolete; use extcpu now */
__u8 finished; /* entry is valid */
__u32 extcpu; /* linux cpu number that detected the error */
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
};
/*
* This structure contains all data related to the MCE log. Also
* carries a signature to make it easier to find from external
* debugging tools. Each entry is only valid when its finished flag
* is set.
*/
#define MCE_LOG_LEN 32
struct mce_log {
char signature[12]; /* "MACHINECHECK" */
unsigned len; /* = MCE_LOG_LEN */
unsigned next;
unsigned flags;
unsigned recordlen; /* length of struct mce */
struct mce entry[MCE_LOG_LEN];
};
#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
#define MCE_LOG_SIGNATURE "MACHINECHECK"
#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
#define MCE_GET_LOG_LEN _IOR('M', 2, int)
#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
/* Software defined banks */
#define MCE_EXTENDED_BANK 128
#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
#endif /* _UAPI_ASM_X86_MCE_H */
#ifndef _UAPI_ASM_X86_MSR_H
#define _UAPI_ASM_X86_MSR_H
#include <asm/msr-index.h>
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/ioctl.h>
#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_ASM_X86_MSR_H */
/* Generic MTRR (Memory Type Range Register) ioctls.
Copyright (C) 1997-1999 Richard Gooch
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au
The postal address is:
Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
*/
#ifndef _UAPI_ASM_X86_MTRR_H
#define _UAPI_ASM_X86_MTRR_H
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/errno.h>
#define MTRR_IOCTL_BASE 'M'
/* Warning: this structure has a different order from i386
on x86-64. The 32bit emulation code takes care of that.
But you need to use this for 64bit, otherwise your X server
will break. */
#ifdef __i386__
struct mtrr_sentry {
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry {
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned int size; /* Size of region */
unsigned int type; /* Type of region */
};
#else /* __i386__ */
struct mtrr_sentry {
__u64 base; /* Base address */
__u32 size; /* Size of region */
__u32 type; /* Type of region */
};
struct mtrr_gentry {
__u64 base; /* Base address */
__u32 size; /* Size of region */
__u32 regnum; /* Register number */
__u32 type; /* Type of region */
__u32 _pad; /* Unused */
};
#endif /* !__i386__ */
struct mtrr_var_range {
__u32 base_lo;
__u32 base_hi;
__u32 mask_lo;
__u32 mask_hi;
};
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
typedef __u8 mtrr_type;
#define MTRR_NUM_FIXED_RANGES 88
#define MTRR_MAX_VAR_RANGES 256
struct mtrr_state_type {
struct mtrr_var_range var_ranges[MTRR_MAX_VAR_RANGES];
mtrr_type fixed_ranges[MTRR_NUM_FIXED_RANGES];
unsigned char enabled;
unsigned char have_fixed;
mtrr_type def_type;
};
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
/* These are the various ioctls */
#define MTRRIOC_ADD_ENTRY _IOW(MTRR_IOCTL_BASE, 0, struct mtrr_sentry)
#define MTRRIOC_SET_ENTRY _IOW(MTRR_IOCTL_BASE, 1, struct mtrr_sentry)
#define MTRRIOC_DEL_ENTRY _IOW(MTRR_IOCTL_BASE, 2, struct mtrr_sentry)
#define MTRRIOC_GET_ENTRY _IOWR(MTRR_IOCTL_BASE, 3, struct mtrr_gentry)
#define MTRRIOC_KILL_ENTRY _IOW(MTRR_IOCTL_BASE, 4, struct mtrr_sentry)
#define MTRRIOC_ADD_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 5, struct mtrr_sentry)
#define MTRRIOC_SET_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 6, struct mtrr_sentry)
#define MTRRIOC_DEL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 7, struct mtrr_sentry)
#define MTRRIOC_GET_PAGE_ENTRY _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
#define MTRRIOC_KILL_PAGE_ENTRY _IOW(MTRR_IOCTL_BASE, 9, struct mtrr_sentry)
/* These are the region types */
#define MTRR_TYPE_UNCACHABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#endif /* _UAPI_ASM_X86_MTRR_H */
#ifndef __KERNEL__
# ifdef __i386__
# include <asm/posix_types_32.h>
# elif defined(__ILP32__)
# include <asm/posix_types_x32.h>
# else
# include <asm/posix_types_64.h>
# endif
#endif
#ifndef _UAPI_ASM_X86_PROCESSOR_FLAGS_H
#define _UAPI_ASM_X86_PROCESSOR_FLAGS_H
/* Various flags defined: can be included from assembler. */
/*
* EFLAGS bits
*/
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
#define X86_EFLAGS_BIT1 0x00000002 /* Bit 1 - always on */
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
/*
* Basic CPU control in CR0
*/
#define X86_CR0_PE 0x00000001 /* Protection Enable */
#define X86_CR0_MP 0x00000002 /* Monitor Coprocessor */
#define X86_CR0_EM 0x00000004 /* Emulation */
#define X86_CR0_TS 0x00000008 /* Task Switched */
#define X86_CR0_ET 0x00000010 /* Extension Type */
#define X86_CR0_NE 0x00000020 /* Numeric Error */
#define X86_CR0_WP 0x00010000 /* Write Protect */
#define X86_CR0_AM 0x00040000 /* Alignment Mask */
#define X86_CR0_NW 0x20000000 /* Not Write-through */
#define X86_CR0_CD 0x40000000 /* Cache Disable */
#define X86_CR0_PG 0x80000000 /* Paging */
/*
* Paging options in CR3
*/
#define X86_CR3_PWT 0x00000008 /* Page Write Through */
#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
/*
* Intel CPU features in CR4
*/
#define X86_CR4_VME 0x00000001 /* enable vm86 extensions */
#define X86_CR4_PVI 0x00000002 /* virtual interrupts flag enable */
#define X86_CR4_TSD 0x00000004 /* disable time stamp at ipl 3 */
#define X86_CR4_DE 0x00000008 /* enable debugging extensions */
#define X86_CR4_PSE 0x00000010 /* enable page size extensions */
#define X86_CR4_PAE 0x00000020 /* enable physical address extensions */
#define X86_CR4_MCE 0x00000040 /* Machine check enable */
#define X86_CR4_PGE 0x00000080 /* enable global pages */
#define X86_CR4_PCE 0x00000100 /* enable performance counters at ipl 3 */
#define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
#define X86_CR4_SMAP 0x00200000 /* enable SMAP support */
/*
* x86-64 Task Priority Register, CR8
*/
#define X86_CR8_TPR 0x0000000F /* task priority register */
/*
* AMD and Transmeta use MSRs for configuration; see <asm/msr-index.h>
*/
/*
* NSC/Cyrix CPU configuration register indexes
*/
#define CX86_PCR0 0x20
#define CX86_GCR 0xb8
#define CX86_CCR0 0xc0
#define CX86_CCR1 0xc1
#define CX86_CCR2 0xc2
#define CX86_CCR3 0xc3
#define CX86_CCR4 0xe8
#define CX86_CCR5 0xe9
#define CX86_CCR6 0xea
#define CX86_CCR7 0xeb
#define CX86_PCR1 0xf0
#define CX86_DIR0 0xfe
#define CX86_DIR1 0xff
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */
#ifndef _UAPI_ASM_X86_PTRACE_H
#define _UAPI_ASM_X86_PTRACE_H
#include <linux/compiler.h> /* For __user */
#include <asm/ptrace-abi.h>
#include <asm/processor-flags.h>
#ifndef __ASSEMBLY__
#ifdef __i386__
/* this struct defines the way the registers are stored on the
stack during a system call. */
#ifndef __KERNEL__
struct pt_regs {
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
int xds;
int xes;
int xfs;
int xgs;
long orig_eax;
long eip;
int xcs;
long eflags;
long esp;
int xss;
};
#endif /* __KERNEL__ */
#else /* __i386__ */
#ifndef __KERNEL__
struct pt_regs {
unsigned long r15;
unsigned long r14;
unsigned long r13;
unsigned long r12;
unsigned long rbp;
unsigned long rbx;
/* arguments: non interrupts/non tracing syscalls only save up to here*/
unsigned long r11;
unsigned long r10;
unsigned long r9;
unsigned long r8;
unsigned long rax;
unsigned long rcx;
unsigned long rdx;
unsigned long rsi;
unsigned long rdi;
unsigned long orig_rax;
/* end of arguments */
/* cpu exception frame or undefined */
unsigned long rip;
unsigned long cs;
unsigned long eflags;
unsigned long rsp;
unsigned long ss;
/* top of stack page */
};
#endif /* __KERNEL__ */
#endif /* !__i386__ */
#endif /* !__ASSEMBLY__ */
#endif /* _UAPI_ASM_X86_PTRACE_H */
#ifndef _UAPI_ASM_X86_SIGCONTEXT_H
#define _UAPI_ASM_X86_SIGCONTEXT_H
#include <linux/compiler.h>
#include <linux/types.h>
#define FP_XSTATE_MAGIC1 0x46505853U
#define FP_XSTATE_MAGIC2 0x46505845U
#define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2)
/*
* bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame
* are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes
* are used to extended the fpstate pointer in the sigcontext, which now
* includes the extended state information along with fpstate information.
*
* Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved
* area and FP_XSTATE_MAGIC2 at the end of memory layout
* (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the
* extended state information in the memory layout pointed by the fpstate
* pointer in sigcontext.
*/
struct _fpx_sw_bytes {
__u32 magic1; /* FP_XSTATE_MAGIC1 */
__u32 extended_size; /* total size of the layout referred by
* fpstate pointer in the sigcontext.
*/
__u64 xstate_bv;
/* feature bit mask (including fp/sse/extended
* state) that is present in the memory
* layout.
*/
__u32 xstate_size; /* actual xsave state size, based on the
* features saved in the layout.
* 'extended_size' will be greater than
* 'xstate_size'.
*/
__u32 padding[7]; /* for future use. */
};
#ifdef __i386__
/*
* As documented in the iBCS2 standard..
*
* The first part of "struct _fpstate" is just the normal i387
* hardware setup, the extra "status" word is used to save the
* coprocessor status word before entering the handler.
*
* Pentium III FXSR, SSE support
* Gareth Hughes <gareth@valinux.com>, May 2000
*
* The FPU state data structure has had to grow to accommodate the
* extended FPU state required by the Streaming SIMD Extensions.
* There is no documented standard to accomplish this at the moment.
*/
struct _fpreg {
unsigned short significand[4];
unsigned short exponent;
};
struct _fpxreg {
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
};
struct _xmmreg {
unsigned long element[4];
};
struct _fpstate {
/* Regular FPU environment */
unsigned long cw;
unsigned long sw;
unsigned long tag;
unsigned long ipoff;
unsigned long cssel;
unsigned long dataoff;
unsigned long datasel;
struct _fpreg _st[8];
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
/* FXSR FPU environment */
unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned long mxcsr;
unsigned long reserved;
struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg _xmm[8];
unsigned long padding1[44];
union {
unsigned long padding2[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state info */
};
};
#define X86_FXSR_MAGIC 0x0000
#ifndef __KERNEL__
/*
* User-space might still rely on the old definition:
*/
struct sigcontext {
unsigned short gs, __gsh;
unsigned short fs, __fsh;
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned long edi;
unsigned long esi;
unsigned long ebp;
unsigned long esp;
unsigned long ebx;
unsigned long edx;
unsigned long ecx;
unsigned long eax;
unsigned long trapno;
unsigned long err;
unsigned long eip;
unsigned short cs, __csh;
unsigned long eflags;
unsigned long esp_at_signal;
unsigned short ss, __ssh;
struct _fpstate __user *fpstate;
unsigned long oldmask;
unsigned long cr2;
};
#endif /* !__KERNEL__ */
#else /* __i386__ */
/* FXSAVE frame */
/* Note: reserved1/2 may someday contain valuable data. Always save/restore
them when you change signal frames. */
struct _fpstate {
__u16 cwd;
__u16 swd;
__u16 twd; /* Note this is not the same as the
32bit/x87/FSAVE twd */
__u16 fop;
__u64 rip;
__u64 rdp;
__u32 mxcsr;
__u32 mxcsr_mask;
__u32 st_space[32]; /* 8*16 bytes for each FP-reg */
__u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */
__u32 reserved2[12];
union {
__u32 reserved3[12];
struct _fpx_sw_bytes sw_reserved; /* represents the extended
* state information */
};
};
#ifndef __KERNEL__
/*
* User-space might still rely on the old definition:
*/
struct sigcontext {
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 rdi;
__u64 rsi;
__u64 rbp;
__u64 rbx;
__u64 rdx;
__u64 rax;
__u64 rcx;
__u64 rsp;
__u64 rip;
__u64 eflags; /* RFLAGS */
__u16 cs;
__u16 gs;
__u16 fs;
__u16 __pad0;
__u64 err;
__u64 trapno;
__u64 oldmask;
__u64 cr2;
struct _fpstate __user *fpstate; /* zero when no FPU context */
#ifdef __ILP32__
__u32 __fpstate_pad;
#endif
__u64 reserved1[8];
};
#endif /* !__KERNEL__ */
#endif /* !__i386__ */
struct _xsave_hdr {
__u64 xstate_bv;
__u64 reserved1[2];
__u64 reserved2[5];
};
struct _ymmh_state {
/* 16 * 16 bytes for each YMMH-reg */
__u32 ymmh_space[64];
};
/*
* Extended state pointed by the fpstate pointer in the sigcontext.
* In addition to the fpstate, information encoded in the xstate_hdr
* indicates the presence of other extended state information
* supported by the processor and OS.
*/
struct _xstate {
struct _fpstate fpstate;
struct _xsave_hdr xstate_hdr;
struct _ymmh_state ymmh;
/* new processor state extensions go here */
};
#endif /* _UAPI_ASM_X86_SIGCONTEXT_H */
#ifndef _UAPI_ASM_X86_SIGNAL_H
#define _UAPI_ASM_X86_SIGNAL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <linux/time.h>
#include <linux/compiler.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#ifndef __KERNEL__
/* Here we must cater to libcs that poke about in kernel headers. */
#define NSIG 32
typedef unsigned long sigset_t;
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001u
#define SA_NOCLDWAIT 0x00000002u
#define SA_SIGINFO 0x00000004u
#define SA_ONSTACK 0x08000000u
#define SA_RESTART 0x10000000u
#define SA_NODEFER 0x40000000u
#define SA_RESETHAND 0x80000000u
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
#include <asm-generic/signal-defs.h>
#ifndef __ASSEMBLY__
#ifdef __i386__
# ifndef __KERNEL__
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
# endif /* ! __KERNEL__ */
#else /* __i386__ */
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#endif /* !__i386__ */
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#endif /* __ASSEMBLY__ */
#endif /* _UAPI_ASM_X86_SIGNAL_H */
#ifndef _UAPI__SVM_H
#define _UAPI__SVM_H
#define SVM_EXIT_READ_CR0 0x000
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
#define SVM_EXIT_READ_CR8 0x008
#define SVM_EXIT_WRITE_CR0 0x010
#define SVM_EXIT_WRITE_CR3 0x013
#define SVM_EXIT_WRITE_CR4 0x014
#define SVM_EXIT_WRITE_CR8 0x018
#define SVM_EXIT_READ_DR0 0x020
#define SVM_EXIT_READ_DR1 0x021
#define SVM_EXIT_READ_DR2 0x022
#define SVM_EXIT_READ_DR3 0x023
#define SVM_EXIT_READ_DR4 0x024
#define SVM_EXIT_READ_DR5 0x025
#define SVM_EXIT_READ_DR6 0x026
#define SVM_EXIT_READ_DR7 0x027
#define SVM_EXIT_WRITE_DR0 0x030
#define SVM_EXIT_WRITE_DR1 0x031
#define SVM_EXIT_WRITE_DR2 0x032
#define SVM_EXIT_WRITE_DR3 0x033
#define SVM_EXIT_WRITE_DR4 0x034
#define SVM_EXIT_WRITE_DR5 0x035
#define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040
#define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062
#define SVM_EXIT_INIT 0x063
#define SVM_EXIT_VINTR 0x064
#define SVM_EXIT_CR0_SEL_WRITE 0x065
#define SVM_EXIT_IDTR_READ 0x066
#define SVM_EXIT_GDTR_READ 0x067
#define SVM_EXIT_LDTR_READ 0x068
#define SVM_EXIT_TR_READ 0x069
#define SVM_EXIT_IDTR_WRITE 0x06a
#define SVM_EXIT_GDTR_WRITE 0x06b
#define SVM_EXIT_LDTR_WRITE 0x06c
#define SVM_EXIT_TR_WRITE 0x06d
#define SVM_EXIT_RDTSC 0x06e
#define SVM_EXIT_RDPMC 0x06f
#define SVM_EXIT_PUSHF 0x070
#define SVM_EXIT_POPF 0x071
#define SVM_EXIT_CPUID 0x072
#define SVM_EXIT_RSM 0x073
#define SVM_EXIT_IRET 0x074
#define SVM_EXIT_SWINT 0x075
#define SVM_EXIT_INVD 0x076
#define SVM_EXIT_PAUSE 0x077
#define SVM_EXIT_HLT 0x078
#define SVM_EXIT_INVLPG 0x079
#define SVM_EXIT_INVLPGA 0x07a
#define SVM_EXIT_IOIO 0x07b
#define SVM_EXIT_MSR 0x07c
#define SVM_EXIT_TASK_SWITCH 0x07d
#define SVM_EXIT_FERR_FREEZE 0x07e
#define SVM_EXIT_SHUTDOWN 0x07f
#define SVM_EXIT_VMRUN 0x080
#define SVM_EXIT_VMMCALL 0x081
#define SVM_EXIT_VMLOAD 0x082
#define SVM_EXIT_VMSAVE 0x083
#define SVM_EXIT_STGI 0x084
#define SVM_EXIT_CLGI 0x085
#define SVM_EXIT_SKINIT 0x086
#define SVM_EXIT_RDTSCP 0x087
#define SVM_EXIT_ICEBP 0x088
#define SVM_EXIT_WBINVD 0x089
#define SVM_EXIT_MONITOR 0x08a
#define SVM_EXIT_MWAIT 0x08b
#define SVM_EXIT_MWAIT_COND 0x08c
#define SVM_EXIT_XSETBV 0x08d
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
{ SVM_EXIT_READ_DR0, "read_dr0" }, \
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
{ SVM_EXIT_INTR, "interrupt" }, \
{ SVM_EXIT_NMI, "nmi" }, \
{ SVM_EXIT_SMI, "smi" }, \
{ SVM_EXIT_INIT, "init" }, \
{ SVM_EXIT_VINTR, "vintr" }, \
{ SVM_EXIT_CPUID, "cpuid" }, \
{ SVM_EXIT_INVD, "invd" }, \
{ SVM_EXIT_HLT, "hlt" }, \
{ SVM_EXIT_INVLPG, "invlpg" }, \
{ SVM_EXIT_INVLPGA, "invlpga" }, \
{ SVM_EXIT_IOIO, "io" }, \
{ SVM_EXIT_MSR, "msr" }, \
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
{ SVM_EXIT_VMRUN, "vmrun" }, \
{ SVM_EXIT_VMMCALL, "hypercall" }, \
{ SVM_EXIT_VMLOAD, "vmload" }, \
{ SVM_EXIT_VMSAVE, "vmsave" }, \
{ SVM_EXIT_STGI, "stgi" }, \
{ SVM_EXIT_CLGI, "clgi" }, \
{ SVM_EXIT_SKINIT, "skinit" }, \
{ SVM_EXIT_WBINVD, "wbinvd" }, \
{ SVM_EXIT_MONITOR, "monitor" }, \
{ SVM_EXIT_MWAIT, "mwait" }, \
{ SVM_EXIT_XSETBV, "xsetbv" }, \
{ SVM_EXIT_NPF, "npf" }
#endif /* _UAPI__SVM_H */
#ifndef _UAPI_ASM_X86_UNISTD_H
#define _UAPI_ASM_X86_UNISTD_H
/* x32 syscall flag bit */
#define __X32_SYSCALL_BIT 0x40000000
#ifndef __KERNEL__
# ifdef __i386__
# include <asm/unistd_32.h>
# elif defined(__ILP32__)
# include <asm/unistd_x32.h>
# else
# include <asm/unistd_64.h>
# endif
#endif
#endif /* _UAPI_ASM_X86_UNISTD_H */
#ifndef _UAPI_ASM_X86_VM86_H
#define _UAPI_ASM_X86_VM86_H
/*
* I'm guessing at the VIF/VIP flag usage, but hope that this is how
* the Pentium uses them. Linux will return from vm86 mode when both
* VIF and VIP is set.
*
* On a Pentium, we could probably optimize the virtual flags directly
* in the eflags register instead of doing it "by hand" in vflags...
*
* Linus
*/
#include <asm/processor-flags.h>
#define BIOSSEG 0x0f000
#define CPU_086 0
#define CPU_186 1
#define CPU_286 2
#define CPU_386 3
#define CPU_486 4
#define CPU_586 5
/*
* Return values for the 'vm86()' system call
*/
#define VM86_TYPE(retval) ((retval) & 0xff)
#define VM86_ARG(retval) ((retval) >> 8)
#define VM86_SIGNAL 0 /* return due to signal */
#define VM86_UNKNOWN 1 /* unhandled GP fault
- IO-instruction or similar */
#define VM86_INTx 2 /* int3/int x instruction (ARG = x) */
#define VM86_STI 3 /* sti/popf/iret instruction enabled
virtual interrupts */
/*
* Additional return values when invoking new vm86()
*/
#define VM86_PICRETURN 4 /* return due to pending PIC request */
#define VM86_TRAP 6 /* return due to DOS-debugger request */
/*
* function codes when invoking new vm86()
*/
#define VM86_PLUS_INSTALL_CHECK 0
#define VM86_ENTER 1
#define VM86_ENTER_NO_BYPASS 2
#define VM86_REQUEST_IRQ 3
#define VM86_FREE_IRQ 4
#define VM86_GET_IRQ_BITS 5
#define VM86_GET_AND_RESET_IRQ 6
/*
* This is the stack-layout seen by the user space program when we have
* done a translation of "SAVE_ALL" from vm86 mode. The real kernel layout
* is 'kernel_vm86_regs' (see below).
*/
struct vm86_regs {
/*
* normal regs, with special meaning for the segment descriptors..
*/
long ebx;
long ecx;
long edx;
long esi;
long edi;
long ebp;
long eax;
long __null_ds;
long __null_es;
long __null_fs;
long __null_gs;
long orig_eax;
long eip;
unsigned short cs, __csh;
long eflags;
long esp;
unsigned short ss, __ssh;
/*
* these are specific to v86 mode:
*/
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
};
struct revectored_struct {
unsigned long __map[8]; /* 256 bits */
};
struct vm86_struct {
struct vm86_regs regs;
unsigned long flags;
unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
};
/*
* flags masks
*/
#define VM86_SCREEN_BITMAP 0x0001
struct vm86plus_info_struct {
unsigned long force_return_for_pic:1;
unsigned long vm86dbg_active:1; /* for debugger */
unsigned long vm86dbg_TFpendig:1; /* for debugger */
unsigned long unused:28;
unsigned long is_vm86pus:1; /* for vm86 internal use */
unsigned char vm86dbg_intxxtab[32]; /* for debugger */
};
struct vm86plus_struct {
struct vm86_regs regs;
unsigned long flags;
unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
struct vm86plus_info_struct vm86plus;
};
#endif /* _UAPI_ASM_X86_VM86_H */
/*
* vmx.h: VMX Architecture related definitions
* Copyright (c) 2004, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* A few random additions are:
* Copyright (C) 2006 Qumranet
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
*/
#ifndef _UAPIVMX_H
#define _UAPIVMX_H
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_PENDING_INTERRUPT 7
#define EXIT_REASON_NMI_WINDOW 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
#define EXIT_REASON_HLT 12
#define EXIT_REASON_INVD 13
#define EXIT_REASON_INVLPG 14
#define EXIT_REASON_RDPMC 15
#define EXIT_REASON_RDTSC 16
#define EXIT_REASON_VMCALL 18
#define EXIT_REASON_VMCLEAR 19
#define EXIT_REASON_VMLAUNCH 20
#define EXIT_REASON_VMPTRLD 21
#define EXIT_REASON_VMPTRST 22
#define EXIT_REASON_VMREAD 23
#define EXIT_REASON_VMRESUME 24
#define EXIT_REASON_VMWRITE 25
#define EXIT_REASON_VMOFF 26
#define EXIT_REASON_VMON 27
#define EXIT_REASON_CR_ACCESS 28
#define EXIT_REASON_DR_ACCESS 29
#define EXIT_REASON_IO_INSTRUCTION 30
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_INVALID_STATE 33
#define EXIT_REASON_MWAIT_INSTRUCTION 36
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_INVPCID 58
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
{ EXIT_REASON_CPUID, "CPUID" }, \
{ EXIT_REASON_HLT, "HLT" }, \
{ EXIT_REASON_INVLPG, "INVLPG" }, \
{ EXIT_REASON_RDPMC, "RDPMC" }, \
{ EXIT_REASON_RDTSC, "RDTSC" }, \
{ EXIT_REASON_VMCALL, "VMCALL" }, \
{ EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
{ EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
{ EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
{ EXIT_REASON_VMPTRST, "VMPTRST" }, \
{ EXIT_REASON_VMREAD, "VMREAD" }, \
{ EXIT_REASON_VMRESUME, "VMRESUME" }, \
{ EXIT_REASON_VMWRITE, "VMWRITE" }, \
{ EXIT_REASON_VMOFF, "VMOFF" }, \
{ EXIT_REASON_VMON, "VMON" }, \
{ EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
{ EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
{ EXIT_REASON_WBINVD, "WBINVD" }
#endif /* _UAPIVMX_H */
#ifndef _UAPI_ASM_X86_VSYSCALL_H
#define _UAPI_ASM_X86_VSYSCALL_H
enum vsyscall_num {
__NR_vgettimeofday,
__NR_vtime,
__NR_vgetcpu,
};
#define VSYSCALL_START (-10UL << 20)
#define VSYSCALL_SIZE 1024
#define VSYSCALL_END (-2UL << 20)
#define VSYSCALL_MAPPED_PAGES 1
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
#endif /* _UAPI_ASM_X86_VSYSCALL_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment