Commit efdfce2b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux

Pull ia64 paravirt removal from Tony Luck:
 "Nobody cares about paravirtualization on ia64 anymore"

* tag 'please-pull-paravirt' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux:
  ia64: remove paravirt code
parents df687709 e55645ec
...@@ -137,29 +137,6 @@ config AUDIT_ARCH ...@@ -137,29 +137,6 @@ config AUDIT_ARCH
bool bool
default y default y
menuconfig PARAVIRT_GUEST
bool "Paravirtualized guest support"
depends on BROKEN
help
Say Y here to get to see options related to running Linux under
various hypervisors. This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
if PARAVIRT_GUEST
config PARAVIRT
bool "Enable paravirtualization code"
depends on PARAVIRT_GUEST
default y
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
over full virtualization. However, when run without a hypervisor
the kernel is theoretically slower and slightly larger.
endif
choice choice
prompt "System type" prompt "System type"
default IA64_GENERIC default IA64_GENERIC
......
...@@ -15,11 +15,7 @@ ...@@ -15,11 +15,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/smp.h> #include <asm/smp.h>
#ifndef CONFIG_PARAVIRT
typedef u8 ia64_vector; typedef u8 ia64_vector;
#else
typedef u16 ia64_vector;
#endif
/* /*
* 0 special * 0 special
...@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq); ...@@ -114,15 +110,11 @@ DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ extern struct irq_chip irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
#ifdef CONFIG_PARAVIRT_GUEST
#include <asm/paravirt.h>
#else
#define ia64_register_ipi ia64_native_register_ipi #define ia64_register_ipi ia64_native_register_ipi
#define assign_irq_vector ia64_native_assign_irq_vector #define assign_irq_vector ia64_native_assign_irq_vector
#define free_irq_vector ia64_native_free_irq_vector #define free_irq_vector ia64_native_free_irq_vector
#define register_percpu_irq ia64_native_register_percpu_irq #define register_percpu_irq ia64_native_register_percpu_irq
#define ia64_resend_irq ia64_native_resend_irq #define ia64_resend_irq ia64_native_resend_irq
#endif
extern void ia64_native_register_ipi(void); extern void ia64_native_register_ipi(void);
extern int bind_irq_vector(int irq, int vector, cpumask_t domain); extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
......
...@@ -7,19 +7,6 @@ ...@@ -7,19 +7,6 @@
#ifndef _ASM_IA64_INTRINSICS_H #ifndef _ASM_IA64_INTRINSICS_H
#define _ASM_IA64_INTRINSICS_H #define _ASM_IA64_INTRINSICS_H
#include <asm/paravirt_privop.h>
#include <uapi/asm/intrinsics.h> #include <uapi/asm/intrinsics.h>
#ifndef __ASSEMBLY__
#if defined(CONFIG_PARAVIRT)
# undef IA64_INTRINSIC_API
# undef IA64_INTRINSIC_MACRO
# ifdef ASM_SUPPORTED
# define IA64_INTRINSIC_API(name) paravirt_ ## name
# else
# define IA64_INTRINSIC_API(name) pv_cpu_ops.name
# endif
#define IA64_INTRINSIC_MACRO(name) paravirt_ ## name
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_INTRINSICS_H */ #endif /* _ASM_IA64_INTRINSICS_H */
...@@ -55,14 +55,10 @@ ...@@ -55,14 +55,10 @@
#define NR_IOSAPICS 256 #define NR_IOSAPICS 256
#ifdef CONFIG_PARAVIRT_GUEST
#include <asm/paravirt.h>
#else
#define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init #define iosapic_pcat_compat_init ia64_native_iosapic_pcat_compat_init
#define __iosapic_read __ia64_native_iosapic_read #define __iosapic_read __ia64_native_iosapic_read
#define __iosapic_write __ia64_native_iosapic_write #define __iosapic_write __ia64_native_iosapic_write
#define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip #define iosapic_get_irq_chip ia64_native_iosapic_get_irq_chip
#endif
extern void __init ia64_native_iosapic_pcat_compat_init(void); extern void __init ia64_native_iosapic_pcat_compat_init(void);
extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger); extern struct irq_chip *ia64_native_iosapic_get_irq_chip(unsigned long trigger);
......
...@@ -18,12 +18,6 @@ struct mod_arch_specific { ...@@ -18,12 +18,6 @@ struct mod_arch_specific {
struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */ struct elf64_shdr *unwind; /* unwind-table section */
#ifdef CONFIG_PARAVIRT
struct elf64_shdr *paravirt_bundles;
/* paravirt_alt_bundle_patch table */
struct elf64_shdr *paravirt_insts;
/* paravirt_alt_inst_patch table */
#endif
unsigned long gp; /* global-pointer for module */ unsigned long gp; /* global-pointer for module */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */
......
...@@ -22,32 +22,6 @@ ...@@ -22,32 +22,6 @@
#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN #define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
#define __paravirt_switch_to ia64_native_switch_to
#define __paravirt_leave_syscall ia64_native_leave_syscall
#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
#define __paravirt_leave_kernel ia64_native_leave_kernel
#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
#define __paravirt_work_processed_syscall_target \
ia64_work_processed_syscall
#define paravirt_fsyscall_table ia64_native_fsyscall_table
#define paravirt_fsys_bubble_down ia64_native_fsys_bubble_down
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
# define PARAVIRT_POISON 0xdeadbeefbaadf00d
# define CLOBBER(clob) \
;; \
movl clob = PARAVIRT_POISON; \
;;
# define CLOBBER_PRED(pred_clob) \
;; \
cmp.eq pred_clob, p0 = r0, r0 \
;;
#else
# define CLOBBER(clob) /* nothing */
# define CLOBBER_PRED(pred_clob) /* nothing */
#endif
#define MOV_FROM_IFA(reg) \ #define MOV_FROM_IFA(reg) \
mov reg = cr.ifa mov reg = cr.ifa
...@@ -70,106 +44,76 @@ ...@@ -70,106 +44,76 @@
mov reg = cr.iip mov reg = cr.iip
#define MOV_FROM_IVR(reg, clob) \ #define MOV_FROM_IVR(reg, clob) \
mov reg = cr.ivr \ mov reg = cr.ivr
CLOBBER(clob)
#define MOV_FROM_PSR(pred, reg, clob) \ #define MOV_FROM_PSR(pred, reg, clob) \
(pred) mov reg = psr \ (pred) mov reg = psr
CLOBBER(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \ #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
(pred) mov reg = ar.itc \ (pred) mov reg = ar.itc
CLOBBER(clob) \
CLOBBER_PRED(pred_clob)
#define MOV_TO_IFA(reg, clob) \ #define MOV_TO_IFA(reg, clob) \
mov cr.ifa = reg \ mov cr.ifa = reg
CLOBBER(clob)
#define MOV_TO_ITIR(pred, reg, clob) \ #define MOV_TO_ITIR(pred, reg, clob) \
(pred) mov cr.itir = reg \ (pred) mov cr.itir = reg
CLOBBER(clob)
#define MOV_TO_IHA(pred, reg, clob) \ #define MOV_TO_IHA(pred, reg, clob) \
(pred) mov cr.iha = reg \ (pred) mov cr.iha = reg
CLOBBER(clob)
#define MOV_TO_IPSR(pred, reg, clob) \ #define MOV_TO_IPSR(pred, reg, clob) \
(pred) mov cr.ipsr = reg \ (pred) mov cr.ipsr = reg
CLOBBER(clob)
#define MOV_TO_IFS(pred, reg, clob) \ #define MOV_TO_IFS(pred, reg, clob) \
(pred) mov cr.ifs = reg \ (pred) mov cr.ifs = reg
CLOBBER(clob)
#define MOV_TO_IIP(reg, clob) \ #define MOV_TO_IIP(reg, clob) \
mov cr.iip = reg \ mov cr.iip = reg
CLOBBER(clob)
#define MOV_TO_KR(kr, reg, clob0, clob1) \ #define MOV_TO_KR(kr, reg, clob0, clob1) \
mov IA64_KR(kr) = reg \ mov IA64_KR(kr) = reg
CLOBBER(clob0) \
CLOBBER(clob1)
#define ITC_I(pred, reg, clob) \ #define ITC_I(pred, reg, clob) \
(pred) itc.i reg \ (pred) itc.i reg
CLOBBER(clob)
#define ITC_D(pred, reg, clob) \ #define ITC_D(pred, reg, clob) \
(pred) itc.d reg \ (pred) itc.d reg
CLOBBER(clob)
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \ #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
(pred_i) itc.i reg; \ (pred_i) itc.i reg; \
(pred_d) itc.d reg \ (pred_d) itc.d reg
CLOBBER(clob)
#define THASH(pred, reg0, reg1, clob) \ #define THASH(pred, reg0, reg1, clob) \
(pred) thash reg0 = reg1 \ (pred) thash reg0 = reg1
CLOBBER(clob)
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \ #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
ssm psr.ic | PSR_DEFAULT_BITS \ ssm psr.ic | PSR_DEFAULT_BITS \
CLOBBER(clob0) \
CLOBBER(clob1) \
;; \ ;; \
srlz.i /* guarantee that interruption collectin is on */ \ srlz.i /* guarantee that interruption collectin is on */ \
;; ;;
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \ #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
ssm psr.ic \ ssm psr.ic \
CLOBBER(clob0) \
CLOBBER(clob1) \
;; \ ;; \
srlz.d srlz.d
#define RSM_PSR_IC(clob) \ #define RSM_PSR_IC(clob) \
rsm psr.ic \ rsm psr.ic
CLOBBER(clob)
#define SSM_PSR_I(pred, pred_clob, clob) \ #define SSM_PSR_I(pred, pred_clob, clob) \
(pred) ssm psr.i \ (pred) ssm psr.i
CLOBBER(clob) \
CLOBBER_PRED(pred_clob)
#define RSM_PSR_I(pred, clob0, clob1) \ #define RSM_PSR_I(pred, clob0, clob1) \
(pred) rsm psr.i \ (pred) rsm psr.i
CLOBBER(clob0) \
CLOBBER(clob1)
#define RSM_PSR_I_IC(clob0, clob1, clob2) \ #define RSM_PSR_I_IC(clob0, clob1, clob2) \
rsm psr.i | psr.ic \ rsm psr.i | psr.ic
CLOBBER(clob0) \
CLOBBER(clob1) \
CLOBBER(clob2)
#define RSM_PSR_DT \ #define RSM_PSR_DT \
rsm psr.dt rsm psr.dt
#define RSM_PSR_BE_I(clob0, clob1) \ #define RSM_PSR_BE_I(clob0, clob1) \
rsm psr.be | psr.i \ rsm psr.be | psr.i
CLOBBER(clob0) \
CLOBBER(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \ #define SSM_PSR_DT_AND_SRLZ_I \
ssm psr.dt \ ssm psr.dt \
...@@ -177,15 +121,10 @@ ...@@ -177,15 +121,10 @@
srlz.i srlz.i
#define BSW_0(clob0, clob1, clob2) \ #define BSW_0(clob0, clob1, clob2) \
bsw.0 \ bsw.0
CLOBBER(clob0) \
CLOBBER(clob1) \
CLOBBER(clob2)
#define BSW_1(clob0, clob1) \ #define BSW_1(clob0, clob1) \
bsw.1 \ bsw.1
CLOBBER(clob0) \
CLOBBER(clob1)
#define COVER \ #define COVER \
cover cover
......
#ifndef _ASM_NATIVE_PVCHK_INST_H
#define _ASM_NATIVE_PVCHK_INST_H
/******************************************************************************
* arch/ia64/include/asm/native/pvchk_inst.h
* Checker for paravirtualizations of privileged operations.
*
* Copyright (C) 2005 Hewlett-Packard Co
* Dan Magenheimer <dan.magenheimer@hp.com>
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/**********************************************
* Instructions paravirtualized for correctness
**********************************************/
/* "fc" and "thash" are privilege-sensitive instructions, meaning they
* may have different semantics depending on whether they are executed
* at PL0 vs PL!=0. When paravirtualized, these instructions mustn't
* be allowed to execute directly, lest incorrect semantics result.
*/
#define fc .error "fc should not be used directly."
#define thash .error "thash should not be used directly."
/* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
* is not currently used (though it may be in a long-format VHPT system!)
* and the semantics of cover only change if psr.ic is off which is very
* rare (and currently non-existent outside of assembly code
*/
#define ttag .error "ttag should not be used directly."
#define cover .error "cover should not be used directly."
/* There are also privilege-sensitive registers. These registers are
* readable at any privilege level but only writable at PL0.
*/
#define cpuid .error "cpuid should not be used directly."
#define pmd .error "pmd should not be used directly."
/*
* mov ar.eflag =
* mov = ar.eflag
*/
/**********************************************
* Instructions paravirtualized for performance
**********************************************/
/*
* Those instructions include '.' which can't be handled by cpp.
* or can't be handled by cpp easily.
* They are handled by sed instead of cpp.
*/
/* for .S
* itc.i
* itc.d
*
* bsw.0
* bsw.1
*
* ssm psr.ic | PSR_DEFAULT_BITS
* ssm psr.ic
* rsm psr.ic
* ssm psr.i
* rsm psr.i
* rsm psr.i | psr.ic
* rsm psr.dt
* ssm psr.dt
*
* mov = cr.ifa
* mov = cr.itir
* mov = cr.isr
* mov = cr.iha
* mov = cr.ipsr
* mov = cr.iim
* mov = cr.iip
* mov = cr.ivr
* mov = psr
*
* mov cr.ifa =
* mov cr.itir =
* mov cr.iha =
* mov cr.ipsr =
* mov cr.ifs =
* mov cr.iip =
* mov cr.kr =
*/
/* for intrinsics
* ssm psr.i
* rsm psr.i
* mov = psr
* mov = ivr
* mov = tpr
* mov cr.itm =
* mov eoi =
* mov rr[] =
* mov = rr[]
* mov = kr
* mov kr =
* ptc.ga
*/
/*************************************************************
* define paravirtualized instrcution macros as nop to ingore.
* and check whether arguments are appropriate.
*************************************************************/
/* check whether reg is a regular register */
.macro is_rreg_in reg
.ifc "\reg", "r0"
nop 0
.exitm
.endif
;;
mov \reg = r0
;;
.endm
#define IS_RREG_IN(reg) is_rreg_in reg ;
#define IS_RREG_OUT(reg) \
;; \
mov reg = r0 \
;;
#define IS_RREG_CLOB(reg) IS_RREG_OUT(reg)
/* check whether pred is a predicate register */
#define IS_PRED_IN(pred) \
;; \
(pred) nop 0 \
;;
#define IS_PRED_OUT(pred) \
;; \
cmp.eq pred, p0 = r0, r0 \
;;
#define IS_PRED_CLOB(pred) IS_PRED_OUT(pred)
#define DO_SAVE_MIN(__COVER, SAVE_IFS, EXTRA, WORKAROUND) \
nop 0
#define MOV_FROM_IFA(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_ITIR(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_ISR(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IHA(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IPSR(pred, reg) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg)
#define MOV_FROM_IIM(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IIP(reg) \
IS_RREG_OUT(reg)
#define MOV_FROM_IVR(reg, clob) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_FROM_PSR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
IS_PRED_IN(pred) \
IS_PRED_CLOB(pred_clob) \
IS_RREG_OUT(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IFA(reg, clob) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_ITIR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IHA(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IPSR(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IFS(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_IIP(reg, clob) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define MOV_TO_KR(kr, reg, clob0, clob1) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define ITC_I(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define ITC_D(pred, reg, clob) \
IS_PRED_IN(pred) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
IS_PRED_IN(pred_i) \
IS_PRED_IN(pred_d) \
IS_RREG_IN(reg) \
IS_RREG_CLOB(clob)
#define THASH(pred, reg0, reg1, clob) \
IS_PRED_IN(pred) \
IS_RREG_OUT(reg0) \
IS_RREG_IN(reg1) \
IS_RREG_CLOB(clob)
#define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define RSM_PSR_IC(clob) \
IS_RREG_CLOB(clob)
#define SSM_PSR_I(pred, pred_clob, clob) \
IS_PRED_IN(pred) \
IS_PRED_CLOB(pred_clob) \
IS_RREG_CLOB(clob)
#define RSM_PSR_I(pred, clob0, clob1) \
IS_PRED_IN(pred) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define RSM_PSR_I_IC(clob0, clob1, clob2) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1) \
IS_RREG_CLOB(clob2)
#define RSM_PSR_DT \
nop 0
#define RSM_PSR_BE_I(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define SSM_PSR_DT_AND_SRLZ_I \
nop 0
#define BSW_0(clob0, clob1, clob2) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1) \
IS_RREG_CLOB(clob2)
#define BSW_1(clob0, clob1) \
IS_RREG_CLOB(clob0) \
IS_RREG_CLOB(clob1)
#define COVER \
nop 0
#define RFI \
br.ret.sptk.many rp /* defining nop causes dependency error */
#endif /* _ASM_NATIVE_PVCHK_INST_H */
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __ASM_PARAVIRT_H
#define __ASM_PARAVIRT_H
#ifndef __ASSEMBLY__
/******************************************************************************
* fsys related addresses
*/
struct pv_fsys_data {
unsigned long *fsyscall_table;
void *fsys_bubble_down;
};
extern struct pv_fsys_data pv_fsys_data;
unsigned long *paravirt_get_fsyscall_table(void);
char *paravirt_get_fsys_bubble_down(void);
/******************************************************************************
* patchlist addresses for gate page
*/
enum pv_gate_patchlist {
PV_GATE_START_FSYSCALL,
PV_GATE_END_FSYSCALL,
PV_GATE_START_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_END_BRL_FSYS_BUBBLE_DOWN,
PV_GATE_START_VTOP,
PV_GATE_END_VTOP,
PV_GATE_START_MCKINLEY_E9,
PV_GATE_END_MCKINLEY_E9,
};
struct pv_patchdata {
unsigned long start_fsyscall_patchlist;
unsigned long end_fsyscall_patchlist;
unsigned long start_brl_fsys_bubble_down_patchlist;
unsigned long end_brl_fsys_bubble_down_patchlist;
unsigned long start_vtop_patchlist;
unsigned long end_vtop_patchlist;
unsigned long start_mckinley_e9_patchlist;
unsigned long end_mckinley_e9_patchlist;
void *gate_section;
};
extern struct pv_patchdata pv_patchdata;
unsigned long paravirt_get_gate_patchlist(enum pv_gate_patchlist type);
void *paravirt_get_gate_section(void);
#endif
#ifdef CONFIG_PARAVIRT_GUEST
#define PARAVIRT_HYPERVISOR_TYPE_DEFAULT 0
#ifndef __ASSEMBLY__
#include <asm/hw_irq.h>
#include <asm/meminit.h>
/******************************************************************************
* general info
*/
struct pv_info {
unsigned int kernel_rpl;
int paravirt_enabled;
const char *name;
};
extern struct pv_info pv_info;
static inline int paravirt_enabled(void)
{
return pv_info.paravirt_enabled;
}
static inline unsigned int get_kernel_rpl(void)
{
return pv_info.kernel_rpl;
}
/******************************************************************************
* initialization hooks.
*/
struct rsvd_region;
struct pv_init_ops {
void (*banner)(void);
int (*reserve_memory)(struct rsvd_region *region);
void (*arch_setup_early)(void);
void (*arch_setup_console)(char **cmdline_p);
int (*arch_setup_nomca)(void);
void (*post_smp_prepare_boot_cpu)(void);
#ifdef ASM_SUPPORTED
unsigned long (*patch_bundle)(void *sbundle, void *ebundle,
unsigned long type);
unsigned long (*patch_inst)(unsigned long stag, unsigned long etag,
unsigned long type);
#endif
void (*patch_branch)(unsigned long tag, unsigned long type);
};
extern struct pv_init_ops pv_init_ops;
static inline void paravirt_banner(void)
{
if (pv_init_ops.banner)
pv_init_ops.banner();
}
static inline int paravirt_reserve_memory(struct rsvd_region *region)
{
if (pv_init_ops.reserve_memory)
return pv_init_ops.reserve_memory(region);
return 0;
}
static inline void paravirt_arch_setup_early(void)
{
if (pv_init_ops.arch_setup_early)
pv_init_ops.arch_setup_early();
}
static inline void paravirt_arch_setup_console(char **cmdline_p)
{
if (pv_init_ops.arch_setup_console)
pv_init_ops.arch_setup_console(cmdline_p);
}
static inline int paravirt_arch_setup_nomca(void)
{
if (pv_init_ops.arch_setup_nomca)
return pv_init_ops.arch_setup_nomca();
return 0;
}
static inline void paravirt_post_smp_prepare_boot_cpu(void)
{
if (pv_init_ops.post_smp_prepare_boot_cpu)
pv_init_ops.post_smp_prepare_boot_cpu();
}
/******************************************************************************
* replacement of iosapic operations.
*/
struct pv_iosapic_ops {
void (*pcat_compat_init)(void);
struct irq_chip *(*__get_irq_chip)(unsigned long trigger);
unsigned int (*__read)(char __iomem *iosapic, unsigned int reg);
void (*__write)(char __iomem *iosapic, unsigned int reg, u32 val);
};
extern struct pv_iosapic_ops pv_iosapic_ops;
static inline void
iosapic_pcat_compat_init(void)
{
if (pv_iosapic_ops.pcat_compat_init)
pv_iosapic_ops.pcat_compat_init();
}
static inline struct irq_chip*
iosapic_get_irq_chip(unsigned long trigger)
{
return pv_iosapic_ops.__get_irq_chip(trigger);
}
static inline unsigned int
__iosapic_read(char __iomem *iosapic, unsigned int reg)
{
return pv_iosapic_ops.__read(iosapic, reg);
}
static inline void
__iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{
return pv_iosapic_ops.__write(iosapic, reg, val);
}
/******************************************************************************
* replacement of irq operations.
*/
struct pv_irq_ops {
void (*register_ipi)(void);
int (*assign_irq_vector)(int irq);
void (*free_irq_vector)(int vector);
void (*register_percpu_irq)(ia64_vector vec,
struct irqaction *action);
void (*resend_irq)(unsigned int vector);
};
extern struct pv_irq_ops pv_irq_ops;
static inline void
ia64_register_ipi(void)
{
pv_irq_ops.register_ipi();
}
static inline int
assign_irq_vector(int irq)
{
return pv_irq_ops.assign_irq_vector(irq);
}
static inline void
free_irq_vector(int vector)
{
return pv_irq_ops.free_irq_vector(vector);
}
static inline void
register_percpu_irq(ia64_vector vec, struct irqaction *action)
{
pv_irq_ops.register_percpu_irq(vec, action);
}
static inline void
ia64_resend_irq(unsigned int vector)
{
pv_irq_ops.resend_irq(vector);
}
/******************************************************************************
* replacement of time operations.
*/
extern struct itc_jitter_data_t itc_jitter_data;
extern volatile int time_keeper_id;
struct pv_time_ops {
void (*init_missing_ticks_accounting)(int cpu);
int (*do_steal_accounting)(unsigned long *new_itm);
void (*clocksource_resume)(void);
unsigned long long (*sched_clock)(void);
};
extern struct pv_time_ops pv_time_ops;
static inline void
paravirt_init_missing_ticks_accounting(int cpu)
{
if (pv_time_ops.init_missing_ticks_accounting)
pv_time_ops.init_missing_ticks_accounting(cpu);
}
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
static inline int
paravirt_do_steal_accounting(unsigned long *new_itm)
{
return pv_time_ops.do_steal_accounting(new_itm);
}
static inline unsigned long long paravirt_sched_clock(void)
{
return pv_time_ops.sched_clock();
}
#endif /* !__ASSEMBLY__ */
#else
/* fallback for native case */
#ifndef __ASSEMBLY__
#define paravirt_banner() do { } while (0)
#define paravirt_reserve_memory(region) 0
#define paravirt_arch_setup_early() do { } while (0)
#define paravirt_arch_setup_console(cmdline_p) do { } while (0)
#define paravirt_arch_setup_nomca() 0
#define paravirt_post_smp_prepare_boot_cpu() do { } while (0)
#define paravirt_init_missing_ticks_accounting(cpu) do { } while (0)
#define paravirt_do_steal_accounting(new_itm) 0
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT_GUEST */
#endif /* __ASM_PARAVIRT_H */
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifndef __ASM_PARAVIRT_PATCH_H
#define __ASM_PARAVIRT_PATCH_H
#ifdef __ASSEMBLY__
.section .paravirt_branches, "a"
.previous
#define PARAVIRT_PATCH_SITE_BR(type) \
{ \
[1:] ; \
br.cond.sptk.many 2f ; \
nop.b 0 ; \
nop.b 0;; ; \
} ; \
2: \
.xdata8 ".paravirt_branches", 1b, type
#else
#include <linux/stringify.h>
#include <asm/intrinsics.h>
/* for binary patch */
struct paravirt_patch_site_bundle {
void *sbundle;
void *ebundle;
unsigned long type;
};
/* label means the beginning of new bundle */
#define paravirt_alt_bundle(instr, privop) \
"\t998:\n" \
"\t" instr "\n" \
"\t999:\n" \
"\t.pushsection .paravirt_bundles, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_bundles\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_bundle_elem {
const void *sbundle;
const void *ebundle;
unsigned long type;
};
struct paravirt_patch_site_inst {
unsigned long stag;
unsigned long etag;
unsigned long type;
};
#define paravirt_alt_inst(instr, privop) \
"\t[998:]\n" \
"\t" instr "\n" \
"\t[999:]\n" \
"\t.pushsection .paravirt_insts, \"a\"\n" \
"\t.popsection\n" \
"\t.xdata8 \".paravirt_insts\", 998b, 999b, " \
__stringify(privop) "\n"
struct paravirt_patch_site_branch {
unsigned long tag;
unsigned long type;
};
struct paravirt_patch_branch_target {
const void *entry;
unsigned long type;
};
void
__paravirt_patch_apply_branch(
unsigned long tag, unsigned long type,
const struct paravirt_patch_branch_target *entries,
unsigned int nr_entries);
void
paravirt_patch_reloc_br(unsigned long tag, const void *target);
void
paravirt_patch_reloc_brl(unsigned long tag, const void *target);
#if defined(ASM_SUPPORTED) && defined(CONFIG_PARAVIRT)
unsigned long
ia64_native_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
unsigned long
__paravirt_patch_apply_bundle(void *sbundle, void *ebundle, unsigned long type,
const struct paravirt_patch_bundle_elem *elems,
unsigned long nelems,
const struct paravirt_patch_bundle_elem **found);
void
paravirt_patch_apply_bundle(const struct paravirt_patch_site_bundle *start,
const struct paravirt_patch_site_bundle *end);
void
paravirt_patch_apply_inst(const struct paravirt_patch_site_inst *start,
const struct paravirt_patch_site_inst *end);
void paravirt_patch_apply(void);
#else
#define paravirt_patch_apply_bundle(start, end) do { } while (0)
#define paravirt_patch_apply_inst(start, end) do { } while (0)
#define paravirt_patch_apply() do { } while (0)
#endif
#endif /* !__ASSEMBLEY__ */
#endif /* __ASM_PARAVIRT_PATCH_H */
/*
* Local variables:
* mode: C
* c-set-style: "linux"
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*/
This diff is collapsed.
...@@ -9,7 +9,7 @@ endif ...@@ -9,7 +9,7 @@ endif
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o unwind.o mca.o mca_asm.o topology.o dma-mapping.o
...@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o ...@@ -35,9 +35,6 @@ mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o \
paravirt_patch.o
obj-$(CONFIG_IA64_ESI) += esi.o obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),) ifneq ($(CONFIG_IA64_ESI),)
obj-y += esi_stub.o # must be in kernel proper obj-y += esi_stub.o # must be in kernel proper
...@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 ...@@ -52,8 +49,6 @@ CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
# The gate DSO image is built using a special linker script. # The gate DSO image is built using a special linker script.
include $(src)/Makefile.gate include $(src)/Makefile.gate
# tell compiled for native
CPPFLAGS_gate.lds += -D__IA64_GATE_PARAVIRTUALIZED_NATIVE
# Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config # Calculate NR_IRQ = max(IA64_NATIVE_NR_IRQS, XEN_NR_IRQS, ...) based on config
define sed-y define sed-y
...@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c ...@@ -84,30 +79,3 @@ arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
$(Q)mkdir -p $(dir $@) $(Q)mkdir -p $(dir $@)
$(call cmd,nr_irqs) $(call cmd,nr_irqs)
#
# native ivt.S, entry.S and fsys.S
#
ASM_PARAVIRT_OBJS = ivt.o entry.o fsys.o
define paravirtualized_native
AFLAGS_$(1) += -D__IA64_ASM_PARAVIRTUALIZED_NATIVE
AFLAGS_pvchk-sed-$(1) += -D__IA64_ASM_PARAVIRTUALIZED_PVCHECK
extra-y += pvchk-$(1)
endef
$(foreach obj,$(ASM_PARAVIRT_OBJS),$(eval $(call paravirtualized_native,$(obj))))
#
# Checker for paravirtualizations of privileged operations.
#
quiet_cmd_pv_check_sed = PVCHK $@
define cmd_pv_check_sed
sed -f $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed $< > $@
endef
$(obj)/pvchk-sed-%.s: $(src)/%.S $(srctree)/arch/$(SRCARCH)/scripts/pvcheck.sed FORCE
$(call if_changed_dep,as_s_S)
$(obj)/pvchk-%.s: $(obj)/pvchk-sed-%.s FORCE
$(call if_changed,pv_check_sed)
$(obj)/pvchk-%.o: $(obj)/pvchk-%.s FORCE
$(call if_changed,as_o_S)
.PRECIOUS: $(obj)/pvchk-sed-%.s $(obj)/pvchk-%.s $(obj)/pvchk-%.o
...@@ -464,7 +464,6 @@ efi_map_pal_code (void) ...@@ -464,7 +464,6 @@ efi_map_pal_code (void)
GRANULEROUNDDOWN((unsigned long) pal_vaddr), GRANULEROUNDDOWN((unsigned long) pal_vaddr),
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)), pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
IA64_GRANULE_SHIFT); IA64_GRANULE_SHIFT);
paravirt_dv_serialize_data();
ia64_set_psr(psr); /* restore psr */ ia64_set_psr(psr); /* restore psr */
} }
......
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include "minstate.h" #include "minstate.h"
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/* /*
* execve() is special because in case of success, we need to * execve() is special because in case of success, we need to
* setup a null register window frame. * setup a null register window frame.
...@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone) ...@@ -161,7 +160,6 @@ GLOBAL_ENTRY(sys_clone)
mov rp=loc0 mov rp=loc0
br.ret.sptk.many rp br.ret.sptk.many rp
END(sys_clone) END(sys_clone)
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
/* /*
* prev_task <- ia64_switch_to(struct task_struct *next) * prev_task <- ia64_switch_to(struct task_struct *next)
...@@ -169,7 +167,7 @@ END(sys_clone) ...@@ -169,7 +167,7 @@ END(sys_clone)
* called. The code starting at .map relies on this. The rest of the code * called. The code starting at .map relies on this. The rest of the code
* doesn't care about the interrupt masking status. * doesn't care about the interrupt masking status.
*/ */
GLOBAL_ENTRY(__paravirt_switch_to) GLOBAL_ENTRY(ia64_switch_to)
.prologue .prologue
alloc r16=ar.pfs,1,0,0,0 alloc r16=ar.pfs,1,0,0,0
DO_SAVE_SWITCH_STACK DO_SAVE_SWITCH_STACK
...@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to) ...@@ -221,9 +219,8 @@ GLOBAL_ENTRY(__paravirt_switch_to)
itr.d dtr[r25]=r23 // wire in new mapping... itr.d dtr[r25]=r23 // wire in new mapping...
SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
br.cond.sptk .done br.cond.sptk .done
END(__paravirt_switch_to) END(ia64_switch_to)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
/* /*
* Note that interrupts are enabled during save_switch_stack and load_switch_stack. This * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
* means that we may get an interrupt with "sp" pointing to the new kernel stack while * means that we may get an interrupt with "sp" pointing to the new kernel stack while
...@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall) ...@@ -639,16 +636,8 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
mov r10=r0 // clear error indication in r10 mov r10=r0 // clear error indication in r10
(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
#ifdef CONFIG_PARAVIRT
;;
br.cond.sptk.few ia64_leave_syscall
;;
#endif /* CONFIG_PARAVIRT */
END(ia64_ret_from_syscall) END(ia64_ret_from_syscall)
#ifndef CONFIG_PARAVIRT
// fall through // fall through
#endif
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
/* /*
* ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
...@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall) ...@@ -694,7 +683,7 @@ END(ia64_ret_from_syscall)
* ar.csd: cleared * ar.csd: cleared
* ar.ssd: cleared * ar.ssd: cleared
*/ */
GLOBAL_ENTRY(__paravirt_leave_syscall) GLOBAL_ENTRY(ia64_leave_syscall)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
* work.need_resched etc. mustn't get changed by this CPU before it returns to * work.need_resched etc. mustn't get changed by this CPU before it returns to
...@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall) ...@@ -722,8 +711,8 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
#endif #endif
.global __paravirt_work_processed_syscall; .global ia64_work_processed_syscall;
__paravirt_work_processed_syscall: ia64_work_processed_syscall:
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
adds r2=PT(LOADRS)+16,r12 adds r2=PT(LOADRS)+16,r12
MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
...@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall: ...@@ -836,9 +825,9 @@ __paravirt_work_processed_syscall:
mov.m ar.ssd=r0 // M2 clear ar.ssd mov.m ar.ssd=r0 // M2 clear ar.ssd
mov f11=f0 // F clear f11 mov f11=f0 // F clear f11
br.cond.sptk.many rbs_switch // B br.cond.sptk.many rbs_switch // B
END(__paravirt_leave_syscall) END(ia64_leave_syscall)
GLOBAL_ENTRY(__paravirt_leave_kernel) GLOBAL_ENTRY(ia64_leave_kernel)
PT_REGS_UNWIND_INFO(0) PT_REGS_UNWIND_INFO(0)
/* /*
* work.need_resched etc. mustn't get changed by this CPU before it returns to * work.need_resched etc. mustn't get changed by this CPU before it returns to
...@@ -1171,26 +1160,25 @@ skip_rbs_switch: ...@@ -1171,26 +1160,25 @@ skip_rbs_switch:
(p6) br.cond.sptk.few .notify (p6) br.cond.sptk.few .notify
br.call.spnt.many rp=preempt_schedule_irq br.call.spnt.many rp=preempt_schedule_irq
.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel br.cond.sptk.many .work_processed_kernel
.notify: .notify:
(pUStk) br.call.spnt.many rp=notify_resume_user (pUStk) br.call.spnt.many rp=notify_resume_user
.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check) .ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
(pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end (pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel br.cond.sptk.many .work_processed_kernel
.global __paravirt_pending_syscall_end; .global ia64_work_pending_syscall_end;
__paravirt_pending_syscall_end: ia64_work_pending_syscall_end:
adds r2=PT(R8)+16,r12 adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12 adds r3=PT(R10)+16,r12
;; ;;
ld8 r8=[r2] ld8 r8=[r2]
ld8 r10=[r3] ld8 r10=[r3]
br.cond.sptk.many __paravirt_work_processed_syscall_target br.cond.sptk.many ia64_work_processed_syscall
END(__paravirt_leave_kernel) END(ia64_leave_kernel)
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
ENTRY(handle_syscall_error) ENTRY(handle_syscall_error)
/* /*
* Some system calls (e.g., ptrace, mmap) can return arbitrary values which could * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
...@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn) ...@@ -1294,7 +1282,7 @@ ENTRY(sys_rt_sigreturn)
adds sp=16,sp adds sp=16,sp
;; ;;
ld8 r9=[sp] // load new ar.unat ld8 r9=[sp] // load new ar.unat
mov.sptk b7=r8,ia64_native_leave_kernel mov.sptk b7=r8,ia64_leave_kernel
;; ;;
mov ar.unat=r9 mov ar.unat=r9
br.many b7 br.many b7
...@@ -1782,4 +1770,3 @@ sys_call_table: ...@@ -1782,4 +1770,3 @@ sys_call_table:
data8 sys_execveat data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include "entry.h" #include "entry.h"
#include "paravirt_inst.h" #include <asm/native/inst.h>
/* /*
* See Documentation/ia64/fsys.txt for details on fsyscalls. * See Documentation/ia64/fsys.txt for details on fsyscalls.
...@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall) ...@@ -402,7 +402,7 @@ ENTRY(fsys_fallback_syscall)
mov r26=ar.pfs mov r26=ar.pfs
END(fsys_fallback_syscall) END(fsys_fallback_syscall)
/* FALL THROUGH */ /* FALL THROUGH */
GLOBAL_ENTRY(paravirt_fsys_bubble_down) GLOBAL_ENTRY(fsys_bubble_down)
.prologue .prologue
.altrp b6 .altrp b6
.body .body
...@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -440,7 +440,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
* *
* PSR.BE : already is turned off in __kernel_syscall_via_epc() * PSR.BE : already is turned off in __kernel_syscall_via_epc()
* PSR.AC : don't care (kernel normally turns PSR.AC on) * PSR.AC : don't care (kernel normally turns PSR.AC on)
* PSR.I : already turned off by the time paravirt_fsys_bubble_down gets * PSR.I : already turned off by the time fsys_bubble_down gets
* invoked * invoked
* PSR.DFL: always 0 (kernel never turns it on) * PSR.DFL: always 0 (kernel never turns it on)
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own * PSR.DFH: don't care --- kernel never touches f32-f127 on its own
...@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -450,7 +450,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
* PSR.DB : don't care --- kernel never enables kernel-level * PSR.DB : don't care --- kernel never enables kernel-level
* breakpoints * breakpoints
* PSR.TB : must be 0 already; if it wasn't zero on entry to * PSR.TB : must be 0 already; if it wasn't zero on entry to
* __kernel_syscall_via_epc, the branch to paravirt_fsys_bubble_down * __kernel_syscall_via_epc, the branch to fsys_bubble_down
* will trigger a taken branch; the taken-trap-handler then * will trigger a taken branch; the taken-trap-handler then
* converts the syscall into a break-based system-call. * converts the syscall into a break-based system-call.
*/ */
...@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down) ...@@ -541,14 +541,14 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
nop.m 0 nop.m 0
(p8) br.call.sptk.many b6=b6 // B (ignore return address) (p8) br.call.sptk.many b6=b6 // B (ignore return address)
br.cond.spnt ia64_trace_syscall // B br.cond.spnt ia64_trace_syscall // B
END(paravirt_fsys_bubble_down) END(fsys_bubble_down)
.rodata .rodata
.align 8 .align 8
.globl paravirt_fsyscall_table .globl fsyscall_table
data8 paravirt_fsys_bubble_down data8 fsys_bubble_down
paravirt_fsyscall_table: fsyscall_table:
data8 fsys_ni_syscall data8 fsys_ni_syscall
data8 0 // exit // 1025 data8 0 // exit // 1025
data8 0 // read data8 0 // read
...@@ -833,4 +833,4 @@ paravirt_fsyscall_table: ...@@ -833,4 +833,4 @@ paravirt_fsyscall_table:
// fill in zeros for the remaining entries // fill in zeros for the remaining entries
.zero: .zero:
.space paravirt_fsyscall_table + 8*NR_syscalls - .zero, 0 .space fsyscall_table + 8*NR_syscalls - .zero, 0
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/page.h> #include <asm/page.h>
#include "paravirt_inst.h" #include <asm/native/inst.h>
/* /*
* We can't easily refer to symbols inside the kernel. To avoid full runtime relocation, * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
...@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc) ...@@ -376,11 +376,4 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
(p9) mov r8=ENOSYS (p9) mov r8=ENOSYS
FSYS_RETURN FSYS_RETURN
#ifdef CONFIG_PARAVIRT
/*
* padd to make the size of this symbol constant
* independent of paravirtualization.
*/
.align PAGE_SIZE / 8
#endif
END(__kernel_syscall_via_epc) END(__kernel_syscall_via_epc)
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
*/ */
#include <asm/page.h> #include <asm/page.h>
#include "paravirt_patchlist.h"
SECTIONS SECTIONS
{ {
...@@ -33,21 +32,21 @@ SECTIONS ...@@ -33,21 +32,21 @@ SECTIONS
. = GATE_ADDR + 0x600; . = GATE_ADDR + 0x600;
.data..patch : { .data..patch : {
__paravirt_start_gate_mckinley_e9_patchlist = .; __start_gate_mckinley_e9_patchlist = .;
*(.data..patch.mckinley_e9) *(.data..patch.mckinley_e9)
__paravirt_end_gate_mckinley_e9_patchlist = .; __end_gate_mckinley_e9_patchlist = .;
__paravirt_start_gate_vtop_patchlist = .; __start_gate_vtop_patchlist = .;
*(.data..patch.vtop) *(.data..patch.vtop)
__paravirt_end_gate_vtop_patchlist = .; __end_gate_vtop_patchlist = .;
__paravirt_start_gate_fsyscall_patchlist = .; __start_gate_fsyscall_patchlist = .;
*(.data..patch.fsyscall_table) *(.data..patch.fsyscall_table)
__paravirt_end_gate_fsyscall_patchlist = .; __end_gate_fsyscall_patchlist = .;
__paravirt_start_gate_brl_fsys_bubble_down_patchlist = .; __start_gate_brl_fsys_bubble_down_patchlist = .;
*(.data..patch.brl_fsys_bubble_down) *(.data..patch.brl_fsys_bubble_down)
__paravirt_end_gate_brl_fsys_bubble_down_patchlist = .; __end_gate_brl_fsys_bubble_down_patchlist = .;
} :readable } :readable
.IA_64.unwind_info : { *(.IA_64.unwind_info*) } .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/pal.h> #include <asm/pal.h>
#include <asm/paravirt.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -394,41 +393,6 @@ start_ap: ...@@ -394,41 +393,6 @@ start_ap:
;; ;;
(isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
#ifdef CONFIG_PARAVIRT
movl r14=hypervisor_setup_hooks
movl r15=hypervisor_type
mov r16=num_hypervisor_hooks
;;
ld8 r2=[r15]
;;
cmp.ltu p7,p0=r2,r16 // array size check
shladd r8=r2,3,r14
;;
(p7) ld8 r9=[r8]
;;
(p7) mov b1=r9
(p7) cmp.ne.unc p7,p0=r9,r0 // no actual branch to NULL
;;
(p7) br.call.sptk.many rp=b1
__INITDATA
default_setup_hook = 0 // Currently nothing needs to be done.
.global hypervisor_type
hypervisor_type:
data8 PARAVIRT_HYPERVISOR_TYPE_DEFAULT
// must have the same order with PARAVIRT_HYPERVISOR_TYPE_xxx
hypervisor_setup_hooks:
data8 default_setup_hook
num_hypervisor_hooks = (. - hypervisor_setup_hooks) / 8
.previous
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
(isAP) br.call.sptk.many rp=start_secondary (isAP) br.call.sptk.many rp=start_secondary
.ret0: .ret0:
...@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock) ...@@ -1063,12 +1027,6 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp br.ret.sptk.many rp
END(ia64_native_sched_clock) END(ia64_native_sched_clock)
#ifndef CONFIG_PARAVIRT
//unsigned long long
//sched_clock(void) __attribute__((alias("ia64_native_sched_clock")));
.global sched_clock
sched_clock = ia64_native_sched_clock
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
GLOBAL_ENTRY(cycle_to_cputime) GLOBAL_ENTRY(cycle_to_cputime)
......
...@@ -937,7 +937,6 @@ END(interrupt) ...@@ -937,7 +937,6 @@ END(interrupt)
* - ar.fpsr: set to kernel settings * - ar.fpsr: set to kernel settings
* - b6: preserved (same as on entry) * - b6: preserved (same as on entry)
*/ */
#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
GLOBAL_ENTRY(ia64_syscall_setup) GLOBAL_ENTRY(ia64_syscall_setup)
#if PT(B6) != 0 #if PT(B6) != 0
# error This code assumes that b6 is the first field in pt_regs. # error This code assumes that b6 is the first field in pt_regs.
...@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup) ...@@ -1029,7 +1028,6 @@ GLOBAL_ENTRY(ia64_syscall_setup)
(p10) mov r8=-EINVAL (p10) mov r8=-EINVAL
br.ret.sptk.many b7 br.ret.sptk.many b7
END(ia64_syscall_setup) END(ia64_syscall_setup)
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
.org ia64_ivt+0x3c00 .org ia64_ivt+0x3c00
///////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////
...@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup) ...@@ -1043,7 +1041,7 @@ END(ia64_syscall_setup)
DBG_FAULT(16) DBG_FAULT(16)
FAULT(16) FAULT(16)
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
/* /*
* There is no particular reason for this code to be here, other than * There is no particular reason for this code to be here, other than
* that there happens to be space here that would go unused otherwise. * that there happens to be space here that would go unused otherwise.
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include "entry.h" #include "entry.h"
#include "paravirt_inst.h" #include <asm/native/inst.h>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */ /* read ar.itc in advance, and use it before leaving bank 0 */
......
...@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, ...@@ -439,14 +439,6 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
mod->arch.opd = s; mod->arch.opd = s;
else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0) else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
mod->arch.unwind = s; mod->arch.unwind = s;
#ifdef CONFIG_PARAVIRT
else if (strcmp(".paravirt_bundles",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_bundles = s;
else if (strcmp(".paravirt_insts",
secstrings + s->sh_name) == 0)
mod->arch.paravirt_insts = s;
#endif
if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) { if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
printk(KERN_ERR "%s: sections missing\n", mod->name); printk(KERN_ERR "%s: sections missing\n", mod->name);
...@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo ...@@ -914,30 +906,6 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
DEBUGP("%s: init: entry=%p\n", __func__, mod->init); DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind) if (mod->arch.unwind)
register_unwind_table(mod); register_unwind_table(mod);
#ifdef CONFIG_PARAVIRT
if (mod->arch.paravirt_bundles) {
struct paravirt_patch_site_bundle *start =
(struct paravirt_patch_site_bundle *)
mod->arch.paravirt_bundles->sh_addr;
struct paravirt_patch_site_bundle *end =
(struct paravirt_patch_site_bundle *)
(mod->arch.paravirt_bundles->sh_addr +
mod->arch.paravirt_bundles->sh_size);
paravirt_patch_apply_bundle(start, end);
}
if (mod->arch.paravirt_insts) {
struct paravirt_patch_site_inst *start =
(struct paravirt_patch_site_inst *)
mod->arch.paravirt_insts->sh_addr;
struct paravirt_patch_site_inst *end =
(struct paravirt_patch_site_inst *)
(mod->arch.paravirt_insts->sh_addr +
mod->arch.paravirt_insts->sh_size);
paravirt_patch_apply_inst(start, end);
}
#endif
return 0; return 0;
} }
......
This diff is collapsed.
/******************************************************************************
* linux/arch/ia64/xen/paravirt_inst.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#ifdef __IA64_ASM_PARAVIRTUALIZED_PVCHECK
#include <asm/native/pvchk_inst.h>
#else
#include <asm/native/inst.h>
#endif
This diff is collapsed.
/******************************************************************************
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/paravirt.h>
#define DECLARE(name) \
extern unsigned long \
__ia64_native_start_gate_##name##_patchlist[]; \
extern unsigned long \
__ia64_native_end_gate_##name##_patchlist[]
DECLARE(fsyscall);
DECLARE(brl_fsys_bubble_down);
DECLARE(vtop);
DECLARE(mckinley_e9);
extern unsigned long __start_gate_section[];
#define ASSIGN(name) \
.start_##name##_patchlist = \
(unsigned long)__ia64_native_start_gate_##name##_patchlist, \
.end_##name##_patchlist = \
(unsigned long)__ia64_native_end_gate_##name##_patchlist
struct pv_patchdata pv_patchdata __initdata = {
ASSIGN(fsyscall),
ASSIGN(brl_fsys_bubble_down),
ASSIGN(vtop),
ASSIGN(mckinley_e9),
.gate_section = (void*)__start_gate_section,
};
unsigned long __init
paravirt_get_gate_patchlist(enum pv_gate_patchlist type)
{
#define CASE(NAME, name) \
case PV_GATE_START_##NAME: \
return pv_patchdata.start_##name##_patchlist; \
case PV_GATE_END_##NAME: \
return pv_patchdata.end_##name##_patchlist; \
switch (type) {
CASE(FSYSCALL, fsyscall);
CASE(BRL_FSYS_BUBBLE_DOWN, brl_fsys_bubble_down);
CASE(VTOP, vtop);
CASE(MCKINLEY_E9, mckinley_e9);
default:
BUG();
break;
}
return 0;
}
void * __init
paravirt_get_gate_section(void)
{
return pv_patchdata.gate_section;
}
/******************************************************************************
* linux/arch/ia64/xen/paravirt_patchlist.h
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/native/patchlist.h>
/******************************************************************************
* linux/arch/ia64/xen/paravirtentry.S
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <asm/asmmacro.h>
#include <asm/asm-offsets.h>
#include <asm/paravirt_privop.h>
#include <asm/paravirt_patch.h>
#include "entry.h"
#define DATA8(sym, init_value) \
.pushsection .data..read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
data8 init_value ; \
.popsection
#define BRANCH(targ, reg, breg, type) \
PARAVIRT_PATCH_SITE_BR(PARAVIRT_PATCH_TYPE_BR_ ## type) ; \
;; \
movl reg=targ ; \
;; \
ld8 reg=[reg] ; \
;; \
mov breg=reg ; \
br.cond.sptk.many breg
#define BRANCH_PROC(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
#define BRANCH_PROC_UNWINFO(sym, reg, breg, type) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
PT_REGS_UNWIND_INFO(0) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg, type) ; \
END(paravirt_ ## sym)
BRANCH_PROC(switch_to, r22, b7, SWITCH_TO)
BRANCH_PROC_UNWINFO(leave_syscall, r22, b7, LEAVE_SYSCALL)
BRANCH_PROC(work_processed_syscall, r2, b7, WORK_PROCESSED_SYSCALL)
BRANCH_PROC_UNWINFO(leave_kernel, r22, b7, LEAVE_KERNEL)
#ifdef CONFIG_MODULES
#define __INIT_OR_MODULE .text
#define __INITDATA_OR_MODULE .data
#else
#define __INIT_OR_MODULE __INIT
#define __INITDATA_OR_MODULE __INITDATA
#endif /* CONFIG_MODULES */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_fc_i)
fc.i r32
br.ret.sptk.many rp
END(paravirt_fc_i)
__FINIT
__INIT_OR_MODULE
.align 32
GLOBAL_ENTRY(paravirt_nop_b_inst_bundle)
{
nop.b 0
nop.b 0
nop.b 0
}
END(paravirt_nop_b_inst_bundle)
__FINIT
/* NOTE: nop.[mfi] has same format */
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_mfi_inst_bundle)
{
nop.m 0
nop.f 0
nop.i 0
}
END(paravirt_nop_mfi_inst_bundle)
__FINIT
__INIT_OR_MODULE
GLOBAL_ENTRY(paravirt_nop_bundle)
paravirt_nop_bundle_start:
{
nop 0
nop 0
nop 0
}
paravirt_nop_bundle_end:
END(paravirt_nop_bundle)
__FINIT
__INITDATA_OR_MODULE
.align 8
.global paravirt_nop_bundle_size
paravirt_nop_bundle_size:
data8 paravirt_nop_bundle_end - paravirt_nop_bundle_start
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/paravirt.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -169,35 +168,16 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ...@@ -169,35 +168,16 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end)
ia64_srlz_i(); ia64_srlz_i();
} }
extern unsigned long ia64_native_fsyscall_table[NR_syscalls];
extern char ia64_native_fsys_bubble_down[];
struct pv_fsys_data pv_fsys_data __initdata = {
.fsyscall_table = (unsigned long *)ia64_native_fsyscall_table,
.fsys_bubble_down = (void *)ia64_native_fsys_bubble_down,
};
unsigned long * __init
paravirt_get_fsyscall_table(void)
{
return pv_fsys_data.fsyscall_table;
}
char * __init
paravirt_get_fsys_bubble_down(void)
{
return pv_fsys_data.fsys_bubble_down;
}
static void __init static void __init
patch_fsyscall_table (unsigned long start, unsigned long end) patch_fsyscall_table (unsigned long start, unsigned long end)
{ {
u64 fsyscall_table = (u64)paravirt_get_fsyscall_table(); extern unsigned long fsyscall_table[NR_syscalls];
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
while (offp < (s32 *) end) { while (offp < (s32 *) end) {
ip = (u64) ia64_imva((char *) offp + *offp); ip = (u64) ia64_imva((char *) offp + *offp);
ia64_patch_imm64(ip, fsyscall_table); ia64_patch_imm64(ip, (u64) fsyscall_table);
ia64_fc((void *) ip); ia64_fc((void *) ip);
++offp; ++offp;
} }
...@@ -208,7 +188,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) ...@@ -208,7 +188,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end)
static void __init static void __init
patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
{ {
u64 fsys_bubble_down = (u64)paravirt_get_fsys_bubble_down(); extern char fsys_bubble_down[];
s32 *offp = (s32 *) start; s32 *offp = (s32 *) start;
u64 ip; u64 ip;
...@@ -226,13 +206,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) ...@@ -226,13 +206,13 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end)
void __init void __init
ia64_patch_gate (void) ia64_patch_gate (void)
{ {
# define START(name) paravirt_get_gate_patchlist(PV_GATE_START_##name) # define START(name) ((unsigned long) __start_gate_##name##_patchlist)
# define END(name) paravirt_get_gate_patchlist(PV_GATE_END_##name) # define END(name) ((unsigned long)__end_gate_##name##_patchlist)
patch_fsyscall_table(START(FSYSCALL), END(FSYSCALL)); patch_fsyscall_table(START(fsyscall), END(fsyscall));
patch_brl_fsys_bubble_down(START(BRL_FSYS_BUBBLE_DOWN), END(BRL_FSYS_BUBBLE_DOWN)); patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
ia64_patch_vtop(START(VTOP), END(VTOP)); ia64_patch_vtop(START(vtop), END(vtop));
ia64_patch_mckinley_e9(START(MCKINLEY_E9), END(MCKINLEY_E9)); ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
} }
void ia64_patch_phys_stack_reg(unsigned long val) void ia64_patch_phys_stack_reg(unsigned long val)
......
...@@ -50,8 +50,6 @@ ...@@ -50,8 +50,6 @@
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/paravirt_patch.h>
#include <asm/patch.h> #include <asm/patch.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -360,8 +358,6 @@ reserve_memory (void) ...@@ -360,8 +358,6 @@ reserve_memory (void)
rsvd_region[n].end = (unsigned long) ia64_imva(_end); rsvd_region[n].end = (unsigned long) ia64_imva(_end);
n++; n++;
n += paravirt_reserve_memory(&rsvd_region[n]);
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) { if (ia64_boot_param->initrd_start) {
rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
...@@ -528,10 +524,7 @@ setup_arch (char **cmdline_p) ...@@ -528,10 +524,7 @@ setup_arch (char **cmdline_p)
{ {
unw_init(); unw_init();
paravirt_arch_setup_early();
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
paravirt_patch_apply();
*cmdline_p = __va(ia64_boot_param->command_line); *cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
...@@ -594,9 +587,6 @@ setup_arch (char **cmdline_p) ...@@ -594,9 +587,6 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */ cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */ mmu_context_init(); /* initialize context_id bitmap */
paravirt_banner();
paravirt_arch_setup_console(cmdline_p);
#ifdef CONFIG_VT #ifdef CONFIG_VT
if (!conswitchp) { if (!conswitchp) {
# if defined(CONFIG_DUMMY_CONSOLE) # if defined(CONFIG_DUMMY_CONSOLE)
...@@ -616,8 +606,6 @@ setup_arch (char **cmdline_p) ...@@ -616,8 +606,6 @@ setup_arch (char **cmdline_p)
#endif #endif
/* enable IA-64 Machine Check Abort Handling unless disabled */ /* enable IA-64 Machine Check Abort Handling unless disabled */
if (paravirt_arch_setup_nomca())
nomca = 1;
if (!nomca) if (!nomca)
ia64_mca_init(); ia64_mca_init();
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -569,7 +568,6 @@ void smp_prepare_boot_cpu(void) ...@@ -569,7 +568,6 @@ void smp_prepare_boot_cpu(void)
cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]); set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/machvec.h> #include <asm/machvec.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/paravirt.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sal.h> #include <asm/sal.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -47,33 +46,12 @@ EXPORT_SYMBOL(last_cli_ip); ...@@ -47,33 +46,12 @@ EXPORT_SYMBOL(last_cli_ip);
#endif #endif
#ifdef CONFIG_PARAVIRT
/* We need to define a real function for sched_clock, to override the
weak default version */
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#endif
#ifdef CONFIG_PARAVIRT
static void
paravirt_clocksource_resume(struct clocksource *cs)
{
if (pv_time_ops.clocksource_resume)
pv_time_ops.clocksource_resume();
}
#endif
static struct clocksource clocksource_itc = { static struct clocksource clocksource_itc = {
.name = "itc", .name = "itc",
.rating = 350, .rating = 350,
.read = itc_get_cycles, .read = itc_get_cycles,
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS, .flags = CLOCK_SOURCE_IS_CONTINUOUS,
#ifdef CONFIG_PARAVIRT
.resume = paravirt_clocksource_resume,
#endif
}; };
static struct clocksource *itc_clocksource; static struct clocksource *itc_clocksource;
...@@ -164,9 +142,6 @@ timer_interrupt (int irq, void *dev_id) ...@@ -164,9 +142,6 @@ timer_interrupt (int irq, void *dev_id)
profile_tick(CPU_PROFILING); profile_tick(CPU_PROFILING);
if (paravirt_do_steal_accounting(&new_itm))
goto skip_process_time_accounting;
while (1) { while (1) {
update_process_times(user_mode(get_irq_regs())); update_process_times(user_mode(get_irq_regs()));
...@@ -187,8 +162,6 @@ timer_interrupt (int irq, void *dev_id) ...@@ -187,8 +162,6 @@ timer_interrupt (int irq, void *dev_id)
local_irq_disable(); local_irq_disable();
} }
skip_process_time_accounting:
do { do {
/* /*
* If we're too close to the next clock tick for * If we're too close to the next clock tick for
...@@ -337,8 +310,6 @@ void ia64_init_itm(void) ...@@ -337,8 +310,6 @@ void ia64_init_itm(void)
*/ */
clocksource_itc.rating = 50; clocksource_itc.rating = 50;
paravirt_init_missing_ticks_accounting(smp_processor_id());
/* avoid softlock up message when cpu is unplug and plugged again. */ /* avoid softlock up message when cpu is unplug and plugged again. */
touch_softlockup_watchdog(); touch_softlockup_watchdog();
......
...@@ -136,27 +136,6 @@ SECTIONS { ...@@ -136,27 +136,6 @@ SECTIONS {
__end___mckinley_e9_bundles = .; __end___mckinley_e9_bundles = .;
} }
#if defined(CONFIG_PARAVIRT)
. = ALIGN(16);
.paravirt_bundles : AT(ADDR(.paravirt_bundles) - LOAD_OFFSET) {
__start_paravirt_bundles = .;
*(.paravirt_bundles)
__stop_paravirt_bundles = .;
}
. = ALIGN(16);
.paravirt_insts : AT(ADDR(.paravirt_insts) - LOAD_OFFSET) {
__start_paravirt_insts = .;
*(.paravirt_insts)
__stop_paravirt_insts = .;
}
. = ALIGN(16);
.paravirt_branches : AT(ADDR(.paravirt_branches) - LOAD_OFFSET) {
__start_paravirt_branches = .;
*(.paravirt_branches)
__stop_paravirt_branches = .;
}
#endif
#if defined(CONFIG_IA64_GENERIC) #if defined(CONFIG_IA64_GENERIC)
/* Machine Vector */ /* Machine Vector */
. = ALIGN(16); . = ALIGN(16);
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/mca.h> #include <asm/mca.h>
#include <asm/paravirt.h>
extern void ia64_tlb_init (void); extern void ia64_tlb_init (void);
...@@ -244,7 +243,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) ...@@ -244,7 +243,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init static void __init
setup_gate (void) setup_gate (void)
{ {
void *gate_section;
struct page *page; struct page *page;
/* /*
...@@ -252,11 +250,10 @@ setup_gate (void) ...@@ -252,11 +250,10 @@ setup_gate (void)
* headers etc. and once execute-only page to enable * headers etc. and once execute-only page to enable
* privilege-promotion via "epc": * privilege-promotion via "epc":
*/ */
gate_section = paravirt_get_gate_section(); page = virt_to_page(ia64_imva(__start_gate_section));
page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY); put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL #ifdef HAVE_BUGGY_SEGREL
page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else #else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
...@@ -642,8 +639,8 @@ mem_init (void) ...@@ -642,8 +639,8 @@ mem_init (void)
* code can tell them apart. * code can tell them apart.
*/ */
for (i = 0; i < NR_syscalls; ++i) { for (i = 0; i < NR_syscalls; ++i) {
extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls];
unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys) if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1; fsyscall_table[i] = sys_call_table[i] | 1;
......
#
# Checker for paravirtualizations of privileged operations.
#
s/ssm.*psr\.ic.*/.warning \"ssm psr.ic should not be used directly\"/g
s/rsm.*psr\.ic.*/.warning \"rsm psr.ic should not be used directly\"/g
s/ssm.*psr\.i.*/.warning \"ssm psr.i should not be used directly\"/g
s/rsm.*psr\.i.*/.warning \"rsm psr.i should not be used directly\"/g
s/ssm.*psr\.dt.*/.warning \"ssm psr.dt should not be used directly\"/g
s/rsm.*psr\.dt.*/.warning \"rsm psr.dt should not be used directly\"/g
s/mov.*=.*cr\.ifa/.warning \"cr.ifa should not used directly\"/g
s/mov.*=.*cr\.itir/.warning \"cr.itir should not used directly\"/g
s/mov.*=.*cr\.isr/.warning \"cr.isr should not used directly\"/g
s/mov.*=.*cr\.iha/.warning \"cr.iha should not used directly\"/g
s/mov.*=.*cr\.ipsr/.warning \"cr.ipsr should not used directly\"/g
s/mov.*=.*cr\.iim/.warning \"cr.iim should not used directly\"/g
s/mov.*=.*cr\.iip/.warning \"cr.iip should not used directly\"/g
s/mov.*=.*cr\.ivr/.warning \"cr.ivr should not used directly\"/g
s/mov.*=[^\.]*psr/.warning \"psr should not used directly\"/g # avoid ar.fpsr
s/mov.*=.*ar\.eflags/.warning \"ar.eflags should not used directly\"/g
s/mov.*=.*ar\.itc.*/.warning \"ar.itc should not used directly\"/g
s/mov.*cr\.ifa.*=.*/.warning \"cr.ifa should not used directly\"/g
s/mov.*cr\.itir.*=.*/.warning \"cr.itir should not used directly\"/g
s/mov.*cr\.iha.*=.*/.warning \"cr.iha should not used directly\"/g
s/mov.*cr\.ipsr.*=.*/.warning \"cr.ipsr should not used directly\"/g
s/mov.*cr\.ifs.*=.*/.warning \"cr.ifs should not used directly\"/g
s/mov.*cr\.iip.*=.*/.warning \"cr.iip should not used directly\"/g
s/mov.*cr\.kr.*=.*/.warning \"cr.kr should not used directly\"/g
s/mov.*ar\.eflags.*=.*/.warning \"ar.eflags should not used directly\"/g
s/itc\.i.*/.warning \"itc.i should not be used directly.\"/g
s/itc\.d.*/.warning \"itc.d should not be used directly.\"/g
s/bsw\.0/.warning \"bsw.0 should not be used directly.\"/g
s/bsw\.1/.warning \"bsw.1 should not be used directly.\"/g
s/ptc\.ga.*/.warning \"ptc.ga should not be used directly.\"/g
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment