Commit 4df8d22b authored by Isaku Yamahata's avatar Isaku Yamahata Committed by Tony Luck

[IA64] pvops: paravirtualize entry.S

paravirtualize ia64_swtich_to, ia64_leave_syscall and ia64_leave_kernel.
They include sensitive or performance critical privileged instructions
so that they need paravirtualization.
To paravirtualize them by single source and multi compile
they are converted into indirect jump. And define each pv instances.

Cc: Keith Owens <kaos@ocs.com.au>
Cc: "Dong, Eddie" <eddie.dong@intel.com>
Signed-off-by: default avatarIsaku Yamahata <yamahata@valinux.co.jp>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 498c5170
...@@ -36,7 +36,7 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o ...@@ -36,7 +36,7 @@ obj-$(CONFIG_PCI_MSI) += msi_ia64.o
mca_recovery-y += mca_drv.o mca_drv_asm.o mca_recovery-y += mca_drv.o mca_drv_asm.o
obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o obj-$(CONFIG_IA64_MC_ERR_INJECT)+= err_inject.o
obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_PARAVIRT) += paravirt.o paravirtentry.o
obj-$(CONFIG_IA64_ESI) += esi.o obj-$(CONFIG_IA64_ESI) += esi.o
ifneq ($(CONFIG_IA64_ESI),) ifneq ($(CONFIG_IA64_ESI),)
......
This diff is collapsed.
...@@ -286,3 +286,22 @@ struct pv_cpu_ops pv_cpu_ops = { ...@@ -286,3 +286,22 @@ struct pv_cpu_ops pv_cpu_ops = {
= ia64_native_intrin_local_irq_restore_func, = ia64_native_intrin_local_irq_restore_func,
}; };
EXPORT_SYMBOL(pv_cpu_ops); EXPORT_SYMBOL(pv_cpu_ops);
/******************************************************************************
* replacement of hand written assembly codes.
*/
void
paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch)
{
extern unsigned long paravirt_switch_to_targ;
extern unsigned long paravirt_leave_syscall_targ;
extern unsigned long paravirt_work_processed_syscall_targ;
extern unsigned long paravirt_leave_kernel_targ;
paravirt_switch_to_targ = cpu_asm_switch->switch_to;
paravirt_leave_syscall_targ = cpu_asm_switch->leave_syscall;
paravirt_work_processed_syscall_targ =
cpu_asm_switch->work_processed_syscall;
paravirt_leave_kernel_targ = cpu_asm_switch->leave_kernel;
}
/******************************************************************************
* linux/arch/ia64/xen/paravirtentry.S
*
* Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
* VA Linux Systems Japan K.K.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/asmmacro.h>
#include <asm/asm-offsets.h>
#include "entry.h"
#define DATA8(sym, init_value) \
.pushsection .data.read_mostly ; \
.align 8 ; \
.global sym ; \
sym: ; \
data8 init_value ; \
.popsection
#define BRANCH(targ, reg, breg) \
movl reg=targ ; \
;; \
ld8 reg=[reg] ; \
;; \
mov breg=reg ; \
br.cond.sptk.many breg
#define BRANCH_PROC(sym, reg, breg) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
END(paravirt_ ## sym)
#define BRANCH_PROC_UNWINFO(sym, reg, breg) \
DATA8(paravirt_ ## sym ## _targ, ia64_native_ ## sym) ; \
GLOBAL_ENTRY(paravirt_ ## sym) ; \
PT_REGS_UNWIND_INFO(0) ; \
BRANCH(paravirt_ ## sym ## _targ, reg, breg) ; \
END(paravirt_ ## sym)
BRANCH_PROC(switch_to, r22, b7)
BRANCH_PROC_UNWINFO(leave_syscall, r22, b7)
BRANCH_PROC(work_processed_syscall, r2, b7)
BRANCH_PROC_UNWINFO(leave_kernel, r22, b7)
...@@ -22,6 +22,14 @@ ...@@ -22,6 +22,14 @@
#define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN #define DO_SAVE_MIN IA64_NATIVE_DO_SAVE_MIN
#define __paravirt_switch_to ia64_native_switch_to
#define __paravirt_leave_syscall ia64_native_leave_syscall
#define __paravirt_work_processed_syscall ia64_native_work_processed_syscall
#define __paravirt_leave_kernel ia64_native_leave_kernel
#define __paravirt_pending_syscall_end ia64_work_pending_syscall_end
#define __paravirt_work_processed_syscall_target \
ia64_work_processed_syscall
#ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK #ifdef CONFIG_PARAVIRT_GUEST_ASM_CLOBBER_CHECK
# define PARAVIRT_POISON 0xdeadbeefbaadf00d # define PARAVIRT_POISON 0xdeadbeefbaadf00d
# define CLOBBER(clob) \ # define CLOBBER(clob) \
......
...@@ -80,12 +80,35 @@ extern unsigned long ia64_native_getreg_func(int regnum); ...@@ -80,12 +80,35 @@ extern unsigned long ia64_native_getreg_func(int regnum);
ia64_native_rsm(mask); \ ia64_native_rsm(mask); \
} while (0) } while (0)
/******************************************************************************
* replacement of hand written assembly codes.
*/
struct pv_cpu_asm_switch {
unsigned long switch_to;
unsigned long leave_syscall;
unsigned long work_processed_syscall;
unsigned long leave_kernel;
};
void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
#else #else
/* fallback for native case */ /* fallback for native case */
#define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
#endif /* CONFIG_PARAVIRT */ #endif /* CONFIG_PARAVIRT */
/* these routines utilize privilege-sensitive or performance-sensitive
* privileged instructions so the code must be replaced with
* paravirtualized versions */
#define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
#define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
#define ia64_work_processed_syscall \
IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
#define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
#endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */ #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment