Commit 003f7de6 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: ia64: remove

KVM for ia64 has been marked as broken not just once, but twice even,
and the last patch from the maintainer is now roughly 5 years old.
Time for it to rest in peace.
Acked-by: default avatarGleb Natapov <gleb@kernel.org>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 86619e7b
......@@ -5352,15 +5352,6 @@ S: Supported
F: arch/powerpc/include/asm/kvm*
F: arch/powerpc/kvm/
KERNEL VIRTUAL MACHINE For Itanium (KVM/IA64)
M: Xiantao Zhang <xiantao.zhang@intel.com>
L: kvm-ia64@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
F: Documentation/ia64/kvm.txt
F: arch/ia64/include/asm/kvm*
F: arch/ia64/kvm/
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@de.ibm.com>
M: Cornelia Huck <cornelia.huck@de.ibm.com>
......
......@@ -21,7 +21,6 @@ config IA64
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
select HAVE_DMA_ATTRS
select HAVE_KVM
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_DMA_API_DEBUG
......@@ -640,8 +639,6 @@ source "security/Kconfig"
source "crypto/Kconfig"
source "arch/ia64/kvm/Kconfig"
source "lib/Kconfig"
config IOMMU_HELPER
......
......@@ -53,7 +53,6 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
core-$(CONFIG_IA64_SGI_UV) += arch/ia64/uv/
core-$(CONFIG_KVM) += arch/ia64/kvm/
drivers-$(CONFIG_PCI) += arch/ia64/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
......
This diff is collapsed.
/*
* same structure to x86's
* Hopefully asm-x86/pvclock-abi.h would be moved to somewhere more generic.
* For now, define same duplicated definitions.
*/
#ifndef _ASM_IA64__PVCLOCK_ABI_H
#define _ASM_IA64__PVCLOCK_ABI_H
#ifndef __ASSEMBLY__
/*
* These structs MUST NOT be changed.
* They are the ABI between hypervisor and guest OS.
* KVM is using this.
*
* pvclock_vcpu_time_info holds the system time and the tsc timestamp
* of the last update. So the guest can use the tsc delta to get a
* more precise system time. There is one per virtual cpu.
*
* pvclock_wall_clock references the point in time when the system
* time was zero (usually boot time), thus the guest calculates the
* current wall clock by adding the system time.
*
* Protocol for the "version" fields is: hypervisor raises it (making
* it uneven) before it starts updating the fields and raises it again
* (making it even) when it is done. Thus the guest can make sure the
* time values it got are consistent by checking the version before
* and after reading them.
*/
struct pvclock_vcpu_time_info {
u32 version;
u32 pad0;
u64 tsc_timestamp;
u64 system_time;
u32 tsc_to_system_mul;
s8 tsc_shift;
u8 pad[3];
} __attribute__((__packed__)); /* 32 bytes */
struct pvclock_wall_clock {
u32 version;
u32 sec;
u32 nsec;
} __attribute__((__packed__));
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64__PVCLOCK_ABI_H */
#ifndef __ASM_IA64_KVM_H
#define __ASM_IA64_KVM_H
/*
* kvm structure definitions for ia64
*
* Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <linux/types.h>
#include <linux/ioctl.h>
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_IOAPIC
#define __KVM_HAVE_IRQ_LINE
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
#define KVM_IOAPIC_NUM_PINS 48
struct kvm_ioapic_state {
__u64 base_address;
__u32 ioregsel;
__u32 id;
__u32 irr;
__u32 pad;
union {
__u64 bits;
struct {
__u8 vector;
__u8 delivery_mode:3;
__u8 dest_mode:1;
__u8 delivery_status:1;
__u8 polarity:1;
__u8 remote_irr:1;
__u8 trig_mode:1;
__u8 mask:1;
__u8 reserve:7;
__u8 reserved[4];
__u8 dest_id;
} fields;
} redirtbl[KVM_IOAPIC_NUM_PINS];
};
#define KVM_IRQCHIP_PIC_MASTER 0
#define KVM_IRQCHIP_PIC_SLAVE 1
#define KVM_IRQCHIP_IOAPIC 2
#define KVM_NR_IRQCHIPS 3
#define KVM_CONTEXT_SIZE 8*1024
struct kvm_fpreg {
union {
unsigned long bits[2];
long double __dummy; /* force 16-byte alignment */
} u;
};
union context {
/* 8K size */
char dummy[KVM_CONTEXT_SIZE];
struct {
unsigned long psr;
unsigned long pr;
unsigned long caller_unat;
unsigned long pad;
unsigned long gr[32];
unsigned long ar[128];
unsigned long br[8];
unsigned long cr[128];
unsigned long rr[8];
unsigned long ibr[8];
unsigned long dbr[8];
unsigned long pkr[8];
struct kvm_fpreg fr[128];
};
};
struct thash_data {
union {
struct {
unsigned long p : 1; /* 0 */
unsigned long rv1 : 1; /* 1 */
unsigned long ma : 3; /* 2-4 */
unsigned long a : 1; /* 5 */
unsigned long d : 1; /* 6 */
unsigned long pl : 2; /* 7-8 */
unsigned long ar : 3; /* 9-11 */
unsigned long ppn : 38; /* 12-49 */
unsigned long rv2 : 2; /* 50-51 */
unsigned long ed : 1; /* 52 */
unsigned long ig1 : 11; /* 53-63 */
};
struct {
unsigned long __rv1 : 53; /* 0-52 */
unsigned long contiguous : 1; /*53 */
unsigned long tc : 1; /* 54 TR or TC */
unsigned long cl : 1;
/* 55 I side or D side cache line */
unsigned long len : 4; /* 56-59 */
unsigned long io : 1; /* 60 entry is for io or not */
unsigned long nomap : 1;
/* 61 entry cann't be inserted into machine TLB.*/
unsigned long checked : 1;
/* 62 for VTLB/VHPT sanity check */
unsigned long invalid : 1;
/* 63 invalid entry */
};
unsigned long page_flags;
}; /* same for VHPT and TLB */
union {
struct {
unsigned long rv3 : 2;
unsigned long ps : 6;
unsigned long key : 24;
unsigned long rv4 : 32;
};
unsigned long itir;
};
union {
struct {
unsigned long ig2 : 12;
unsigned long vpn : 49;
unsigned long vrn : 3;
};
unsigned long ifa;
unsigned long vadr;
struct {
unsigned long tag : 63;
unsigned long ti : 1;
};
unsigned long etag;
};
union {
struct thash_data *next;
unsigned long rid;
unsigned long gpaddr;
};
};
#define NITRS 8
#define NDTRS 8
struct saved_vpd {
unsigned long vhpi;
unsigned long vgr[16];
unsigned long vbgr[16];
unsigned long vnat;
unsigned long vbnat;
unsigned long vcpuid[5];
unsigned long vpsr;
unsigned long vpr;
union {
unsigned long vcr[128];
struct {
unsigned long dcr;
unsigned long itm;
unsigned long iva;
unsigned long rsv1[5];
unsigned long pta;
unsigned long rsv2[7];
unsigned long ipsr;
unsigned long isr;
unsigned long rsv3;
unsigned long iip;
unsigned long ifa;
unsigned long itir;
unsigned long iipa;
unsigned long ifs;
unsigned long iim;
unsigned long iha;
unsigned long rsv4[38];
unsigned long lid;
unsigned long ivr;
unsigned long tpr;
unsigned long eoi;
unsigned long irr[4];
unsigned long itv;
unsigned long pmv;
unsigned long cmcv;
unsigned long rsv5[5];
unsigned long lrr0;
unsigned long lrr1;
unsigned long rsv6[46];
};
};
};
struct kvm_regs {
struct saved_vpd vpd;
/*Arch-regs*/
int mp_state;
unsigned long vmm_rr;
/* TR and TC. */
struct thash_data itrs[NITRS];
struct thash_data dtrs[NDTRS];
/* Bit is set if there is a tr/tc for the region. */
unsigned char itr_regions;
unsigned char dtr_regions;
unsigned char tc_regions;
char irq_check;
unsigned long saved_itc;
unsigned long itc_check;
unsigned long timer_check;
unsigned long timer_pending;
unsigned long last_itc;
unsigned long vrr[8];
unsigned long ibr[8];
unsigned long dbr[8];
unsigned long insvc[4]; /* Interrupt in service. */
unsigned long xtp;
unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
unsigned long metaphysical_saved_rr0; /* from kvm_arch */
unsigned long metaphysical_saved_rr4; /* from kvm_arch */
unsigned long fp_psr; /*used for lazy float register */
unsigned long saved_gp;
/*for phycial emulation */
union context saved_guest;
unsigned long reserved[64]; /* for future use */
};
struct kvm_sregs {
};
struct kvm_fpu {
};
#define KVM_IA64_VCPU_STACK_SHIFT 16
#define KVM_IA64_VCPU_STACK_SIZE (1UL << KVM_IA64_VCPU_STACK_SHIFT)
struct kvm_ia64_vcpu_stack {
unsigned char stack[KVM_IA64_VCPU_STACK_SIZE];
};
struct kvm_debug_exit_arch {
};
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
};
/* definition of registers in kvm_run */
struct kvm_sync_regs {
};
#endif
#
# KVM configuration
#
source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
depends on HAVE_KVM || IA64
default y
---help---
Say Y here to get to see options for using your Linux host to run other
operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
If you say N, all options in this submenu will be skipped and disabled.
if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on BROKEN
depends on HAVE_KVM && MODULES
depends on BROKEN
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
select KVM_APIC_ARCHITECTURE
select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
processor equipped with virtualization extensions. You will also
need to select one or more of the processor modules below.
This module provides access to the hardware capabilities through
a character device node named /dev/kvm.
To compile this as a module, choose M here: the module
will be called kvm.
If unsure, say N.
config KVM_INTEL
tristate "KVM for Intel Itanium 2 processors support"
depends on KVM && m
---help---
Provides support for KVM on Itanium 2 processors equipped with the VT
extensions.
config KVM_DEVICE_ASSIGNMENT
bool "KVM legacy PCI device assignment support"
depends on KVM && PCI && IOMMU_API
default y
---help---
Provide support for legacy PCI device assignment through KVM. The
kernel now also supports a full featured userspace device driver
framework through VFIO, which supersedes much of this support.
If unsure, say Y.
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
#This Make file is to generate asm-offsets.h and build source.
#
#Generate asm-offsets.h for vmm module build
offsets-file := asm-offsets.h
always := $(offsets-file)
targets := $(offsets-file)
targets += arch/ia64/kvm/asm-offsets.s
# Default sed regexp - multiline due to syntax constraints
define sed-y
"/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
endef
quiet_cmd_offsets = GEN $@
define cmd_offsets
(set -e; \
echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
echo "#define __ASM_KVM_OFFSETS_H__"; \
echo "/*"; \
echo " * DO NOT MODIFY."; \
echo " *"; \
echo " * This file was generated by Makefile"; \
echo " *"; \
echo " */"; \
echo ""; \
sed -ne $(sed-y) $<; \
echo ""; \
echo "#endif" ) > $@
endef
# We use internal rules to avoid the "is up to date" message from make
arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c \
$(wildcard $(srctree)/arch/ia64/include/asm/*.h)\
$(wildcard $(srctree)/include/linux/*.h)
$(call if_changed_dep,cc_s_c)
$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
$(call cmd,offsets)
FORCE : $(obj)/$(offsets-file)
#
# Makefile for Kernel-based Virtual Machine module
#
ccflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
asflags-y := -Ivirt/kvm -Iarch/ia64/kvm/
KVM := ../../../virt/kvm
common-objs = $(KVM)/kvm_main.o $(KVM)/ioapic.o \
$(KVM)/coalesced_mmio.o $(KVM)/irq_comm.o
ifeq ($(CONFIG_KVM_DEVICE_ASSIGNMENT),y)
common-objs += $(KVM)/assigned-dev.o $(KVM)/iommu.o
endif
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
obj-$(CONFIG_KVM) += kvm.o
CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
vtlb.o process.o kvm_lib.o
#Add link memcpy and memset to avoid possible structure assignment error
kvm-intel-objs += memcpy.o memset.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
/*
* asm-offsets.c Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed
* to extract and format the required data.
*
* Anthony Xu <anthony.xu@intel.com>
* Xiantao Zhang <xiantao.zhang@intel.com>
* Copyright (c) 2007 Intel Corporation KVM support.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <linux/kvm_host.h>
#include <linux/kbuild.h>
#include "vcpu.h"
void foo(void)
{
DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
BLANK();
DEFINE(VMM_VCPU_META_RR0_OFFSET,
offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
offsetof(struct kvm_vcpu,
arch.metaphysical_saved_rr0));
DEFINE(VMM_VCPU_VRR0_OFFSET,
offsetof(struct kvm_vcpu, arch.vrr[0]));
DEFINE(VMM_VPD_IRR0_OFFSET,
offsetof(struct vpd, irr[0]));
DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
offsetof(struct kvm_vcpu, arch.itc_check));
DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
offsetof(struct kvm_vcpu, arch.irq_check));
DEFINE(VMM_VPD_VHPI_OFFSET,
offsetof(struct vpd, vhpi));
DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
offsetof(struct kvm_vcpu, arch.vsa_base));
DEFINE(VMM_VCPU_VPD_OFFSET,
offsetof(struct kvm_vcpu, arch.vpd));
DEFINE(VMM_VCPU_IRQ_CHECK,
offsetof(struct kvm_vcpu, arch.irq_check));
DEFINE(VMM_VCPU_TIMER_PENDING,
offsetof(struct kvm_vcpu, arch.timer_pending));
DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
offsetof(struct kvm_vcpu, arch.mode_flags));
DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
offsetof(struct kvm_vcpu, arch.itc_offset));
DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
offsetof(struct kvm_vcpu, arch.last_itc));
DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
offsetof(struct kvm_vcpu, arch.saved_gp));
BLANK();
DEFINE(VMM_PT_REGS_B6_OFFSET,
offsetof(struct kvm_pt_regs, b6));
DEFINE(VMM_PT_REGS_B7_OFFSET,
offsetof(struct kvm_pt_regs, b7));
DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
offsetof(struct kvm_pt_regs, ar_csd));
DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
offsetof(struct kvm_pt_regs, ar_ssd));
DEFINE(VMM_PT_REGS_R8_OFFSET,
offsetof(struct kvm_pt_regs, r8));
DEFINE(VMM_PT_REGS_R9_OFFSET,
offsetof(struct kvm_pt_regs, r9));
DEFINE(VMM_PT_REGS_R10_OFFSET,
offsetof(struct kvm_pt_regs, r10));
DEFINE(VMM_PT_REGS_R11_OFFSET,
offsetof(struct kvm_pt_regs, r11));
DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
offsetof(struct kvm_pt_regs, cr_ipsr));
DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
offsetof(struct kvm_pt_regs, cr_iip));
DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
offsetof(struct kvm_pt_regs, cr_ifs));
DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
offsetof(struct kvm_pt_regs, ar_unat));
DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
offsetof(struct kvm_pt_regs, ar_pfs));
DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
offsetof(struct kvm_pt_regs, ar_rsc));
DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
offsetof(struct kvm_pt_regs, ar_rnat));
DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
offsetof(struct kvm_pt_regs, ar_bspstore));
DEFINE(VMM_PT_REGS_PR_OFFSET,
offsetof(struct kvm_pt_regs, pr));
DEFINE(VMM_PT_REGS_B0_OFFSET,
offsetof(struct kvm_pt_regs, b0));
DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
offsetof(struct kvm_pt_regs, loadrs));
DEFINE(VMM_PT_REGS_R1_OFFSET,
offsetof(struct kvm_pt_regs, r1));
DEFINE(VMM_PT_REGS_R12_OFFSET,
offsetof(struct kvm_pt_regs, r12));
DEFINE(VMM_PT_REGS_R13_OFFSET,
offsetof(struct kvm_pt_regs, r13));
DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
offsetof(struct kvm_pt_regs, ar_fpsr));
DEFINE(VMM_PT_REGS_R15_OFFSET,
offsetof(struct kvm_pt_regs, r15));
DEFINE(VMM_PT_REGS_R14_OFFSET,
offsetof(struct kvm_pt_regs, r14));
DEFINE(VMM_PT_REGS_R2_OFFSET,
offsetof(struct kvm_pt_regs, r2));
DEFINE(VMM_PT_REGS_R3_OFFSET,
offsetof(struct kvm_pt_regs, r3));
DEFINE(VMM_PT_REGS_R16_OFFSET,
offsetof(struct kvm_pt_regs, r16));
DEFINE(VMM_PT_REGS_R17_OFFSET,
offsetof(struct kvm_pt_regs, r17));
DEFINE(VMM_PT_REGS_R18_OFFSET,
offsetof(struct kvm_pt_regs, r18));
DEFINE(VMM_PT_REGS_R19_OFFSET,
offsetof(struct kvm_pt_regs, r19));
DEFINE(VMM_PT_REGS_R20_OFFSET,
offsetof(struct kvm_pt_regs, r20));
DEFINE(VMM_PT_REGS_R21_OFFSET,
offsetof(struct kvm_pt_regs, r21));
DEFINE(VMM_PT_REGS_R22_OFFSET,
offsetof(struct kvm_pt_regs, r22));
DEFINE(VMM_PT_REGS_R23_OFFSET,
offsetof(struct kvm_pt_regs, r23));
DEFINE(VMM_PT_REGS_R24_OFFSET,
offsetof(struct kvm_pt_regs, r24));
DEFINE(VMM_PT_REGS_R25_OFFSET,
offsetof(struct kvm_pt_regs, r25));
DEFINE(VMM_PT_REGS_R26_OFFSET,
offsetof(struct kvm_pt_regs, r26));
DEFINE(VMM_PT_REGS_R27_OFFSET,
offsetof(struct kvm_pt_regs, r27));
DEFINE(VMM_PT_REGS_R28_OFFSET,
offsetof(struct kvm_pt_regs, r28));
DEFINE(VMM_PT_REGS_R29_OFFSET,
offsetof(struct kvm_pt_regs, r29));
DEFINE(VMM_PT_REGS_R30_OFFSET,
offsetof(struct kvm_pt_regs, r30));
DEFINE(VMM_PT_REGS_R31_OFFSET,
offsetof(struct kvm_pt_regs, r31));
DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
offsetof(struct kvm_pt_regs, ar_ccv));
DEFINE(VMM_PT_REGS_F6_OFFSET,
offsetof(struct kvm_pt_regs, f6));
DEFINE(VMM_PT_REGS_F7_OFFSET,
offsetof(struct kvm_pt_regs, f7));
DEFINE(VMM_PT_REGS_F8_OFFSET,
offsetof(struct kvm_pt_regs, f8));
DEFINE(VMM_PT_REGS_F9_OFFSET,
offsetof(struct kvm_pt_regs, f9));
DEFINE(VMM_PT_REGS_F10_OFFSET,
offsetof(struct kvm_pt_regs, f10));
DEFINE(VMM_PT_REGS_F11_OFFSET,
offsetof(struct kvm_pt_regs, f11));
DEFINE(VMM_PT_REGS_R4_OFFSET,
offsetof(struct kvm_pt_regs, r4));
DEFINE(VMM_PT_REGS_R5_OFFSET,
offsetof(struct kvm_pt_regs, r5));
DEFINE(VMM_PT_REGS_R6_OFFSET,
offsetof(struct kvm_pt_regs, r6));
DEFINE(VMM_PT_REGS_R7_OFFSET,
offsetof(struct kvm_pt_regs, r7));
DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
offsetof(struct kvm_pt_regs, eml_unat));
DEFINE(VMM_VCPU_IIPA_OFFSET,
offsetof(struct kvm_vcpu, arch.cr_iipa));
DEFINE(VMM_VCPU_OPCODE_OFFSET,
offsetof(struct kvm_vcpu, arch.opcode));
DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
DEFINE(VMM_VCPU_ISR_OFFSET,
offsetof(struct kvm_vcpu, arch.cr_isr));
DEFINE(VMM_PT_REGS_R16_SLOT,
(((offsetof(struct kvm_pt_regs, r16)
- sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
offsetof(struct kvm_vcpu, arch.mode_flags));
DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
BLANK();
DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
offsetof(struct kvm_vcpu, arch.insvc[0]));
DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
BLANK();
}
/*
* irq.h: In-kernel interrupt controller related definitions
* Copyright (c) 2008, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Xiantao Zhang <xiantao.zhang@intel.com>
*
*/
#ifndef __IRQ_H
#define __IRQ_H
#include "lapic.h"
static inline int irqchip_in_kernel(struct kvm *kvm)
{
return 1;
}
#endif
This diff is collapsed.
This diff is collapsed.
/*
* kvm_lib.c: Compile some libraries for kvm-intel module.
*
* Just include kernel's library, and disable symbols export.
* Copyright (C) 2008, Intel Corporation.
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#undef CONFIG_MODULES
#include <linux/module.h>
#undef CONFIG_KALLSYMS
#undef EXPORT_SYMBOL
#undef EXPORT_SYMBOL_GPL
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#include "../../../lib/vsprintf.c"
#include "../../../lib/ctype.c"
/*
* kvm_minstate.h: min save macros
* Copyright (c) 2007, Intel Corporation.
*
* Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <asm/asmmacro.h>
#include <asm/types.h>
#include <asm/kregs.h>
#include <asm/kvm_host.h>
#include "asm-offsets.h"
#define KVM_MINSTATE_START_SAVE_MIN \
mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
;; \
mov.m r28 = ar.rnat; \
addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
;; \
lfetch.fault.excl.nt1 [r22]; \
addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
mov r23 = ar.bspstore; /* save ar.bspstore */ \
;; \
mov ar.bspstore = r22; /* switch to kernel RBS */\
;; \
mov r18 = ar.bsp; \
mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
#define KVM_MINSTATE_END_SAVE_MIN \
bsw.1; /* switch back to bank 1 (must be last in insn group) */\
;;
#define PAL_VSA_SYNC_READ \
/* begin to call pal vps sync_read */ \
{.mii; \
add r25 = VMM_VPD_BASE_OFFSET, r21; \
nop 0x0; \
mov r24=ip; \
;; \
} \
{.mmb \
add r24=0x20, r24; \
ld8 r25 = [r25]; /* read vpd base */ \
br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
;; \
}; \
#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
/*
* KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
* on.
*
* Assumed state upon entry:
* psr.ic: off
* r31: contains saved predicates (pr)
*
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
mov r27 = ar.rsc; /* M */ \
mov r20 = r1; /* A */ \
mov r25 = ar.unat; /* M */ \
mov r29 = cr.ipsr; /* M */ \
mov r26 = ar.pfs; /* I */ \
mov r18 = cr.isr; \
COVER; /* B;; (or nothing) */ \
;; \
tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
mov r1 = r16; \
/* mov r21=r16; */ \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
SAVE_IFS; \
;; \
KVM_MINSTATE_START_SAVE_MIN \
adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
adds r16 = PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16] = r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
mov r29 = b0 \
;; \
adds r16 = PT(R8),r1; /* initialize first base pointer */\
adds r17 = PT(R9),r1; /* initialize second base pointer */\
;; \
.mem.offset 0,0; st8.spill [r16] = r8,16; \
.mem.offset 8,0; st8.spill [r17] = r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16] = r10,24; \
.mem.offset 8,0; st8.spill [r17] = r11,24; \
;; \
mov r9 = cr.iip; /* M */ \
mov r10 = ar.fpsr; /* M */ \
;; \
st8 [r16] = r9,16; /* save cr.iip */ \
st8 [r17] = r30,16; /* save cr.ifs */ \
sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
;; \
st8 [r16] = r25,16; /* save ar.unat */ \
st8 [r17] = r26,16; /* save ar.pfs */ \
shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
;; \
st8 [r16] = r27,16; /* save ar.rsc */ \
st8 [r17] = r28,16; /* save ar.rnat */ \
;; /* avoid RAW on r16 & r17 */ \
st8 [r16] = r23,16; /* save ar.bspstore */ \
st8 [r17] = r31,16; /* save predicates */ \
;; \
st8 [r16] = r29,16; /* save b0 */ \
st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
;; \
.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
.mem.offset 8,0; st8.spill [r17] = r12,16; \
adds r12 = -16,r1; /* switch to kernel memory stack */ \
;; \
.mem.offset 0,0; st8.spill [r16] = r13,16; \
.mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
mov r13 = r21; /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16] = r15,16; \
.mem.offset 8,0; st8.spill [r17] = r14,16; \
;; \
.mem.offset 0,0; st8.spill [r16] = r2,16; \
.mem.offset 8,0; st8.spill [r17] = r3,16; \
adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
;; \
adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
mov r26 = cr.iipa; \
mov r27 = cr.isr; \
;; \
st8 [r16] = r26; \
st8 [r17] = r27; \
;; \
EXTRA; \
mov r8 = ar.ccv; \
mov r9 = ar.csd; \
mov r10 = ar.ssd; \
movl r11 = FPSR_DEFAULT; /* L-unit */ \
adds r17 = VMM_VCPU_GP_OFFSET,r13; \
;; \
ld8 r1 = [r17];/* establish kernel global pointer */ \
;; \
PAL_VSA_SYNC_READ \
KVM_MINSTATE_END_SAVE_MIN
/*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
* r2: points to &pt_regs.f6
* r3: points to &pt_regs.f7
* r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
*
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define KVM_SAVE_REST \
.mem.offset 0,0; st8.spill [r2] = r16,16; \
.mem.offset 8,0; st8.spill [r3] = r17,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r18,16; \
.mem.offset 8,0; st8.spill [r3] = r19,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r20,16; \
.mem.offset 8,0; st8.spill [r3] = r21,16; \
mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r2] = r22,16; \
.mem.offset 8,0; st8.spill [r3] = r23,16; \
mov r19 = b7; \
;; \
.mem.offset 0,0; st8.spill [r2] = r24,16; \
.mem.offset 8,0; st8.spill [r3] = r25,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r26,16; \
.mem.offset 8,0; st8.spill [r3] = r27,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r28,16; \
.mem.offset 8,0; st8.spill [r3] = r29,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r30,16; \
.mem.offset 8,0; st8.spill [r3] = r31,32; \
;; \
mov ar.fpsr = r11; \
st8 [r2] = r8,8; \
adds r24 = PT(B6)-PT(F7),r3; \
adds r25 = PT(B7)-PT(F7),r3; \
;; \
st8 [r24] = r18,16; /* b6 */ \
st8 [r25] = r19,16; /* b7 */ \
adds r2 = PT(R4)-PT(F6),r2; \
adds r3 = PT(R5)-PT(F7),r3; \
;; \
st8 [r24] = r9; /* ar.csd */ \
st8 [r25] = r10; /* ar.ssd */ \
;; \
mov r18 = ar.unat; \
adds r19 = PT(EML_UNAT)-PT(R4),r2; \
;; \
st8 [r19] = r18; /* eml_unat */ \
#define KVM_SAVE_EXTRA \
.mem.offset 0,0; st8.spill [r2] = r4,16; \
.mem.offset 8,0; st8.spill [r3] = r5,16; \
;; \
.mem.offset 0,0; st8.spill [r2] = r6,16; \
.mem.offset 8,0; st8.spill [r3] = r7; \
;; \
mov r26 = ar.unat; \
;; \
st8 [r2] = r26;/* eml_unat */ \
#define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
#define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
#define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )
#ifndef __KVM_IA64_LAPIC_H
#define __KVM_IA64_LAPIC_H
#include <linux/kvm_host.h>
/*
* vlsapic
*/
struct kvm_lapic{
struct kvm_vcpu *vcpu;
uint64_t insvc[4];
uint64_t vhpi;
uint8_t xtp;
uint8_t pal_init_pending;
uint8_t pad[2];
};
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode);
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
#define kvm_apic_present(x) (true)
#define kvm_lapic_enabled(x) (true)
#endif
#include "../lib/memcpy.S"
#include "../lib/memset.S"
#ifndef __KVM_IA64_MISC_H
#define __KVM_IA64_MISC_H
#include <linux/kvm_host.h>
/*
* misc.h
* Copyright (C) 2007, Intel Corporation.
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
/*
*Return p2m base address at host side!
*/
static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
{
return (uint64_t *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_p2m));
}
static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
u64 paddr, u64 mem_flags)
{
uint64_t *pmt_base = kvm_host_get_pmt(kvm);
unsigned long pte;
pte = PAGE_ALIGN(paddr) | mem_flags;
pmt_base[gfn] = pte;
}
/*Function for translating host address to guest address*/
static inline void *to_guest(struct kvm *kvm, void *addr)
{
return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
KVM_VM_DATA_BASE);
}
/*Function for translating guest address to host address*/
static inline void *to_host(struct kvm *kvm, void *addr)
{
return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
+ kvm->arch.vm_base);
}
/* Get host context of the vcpu */
static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
{
union context *ctx = &vcpu->arch.host;
return to_guest(vcpu->kvm, ctx);
}
/* Get guest context of the vcpu */
static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
{
union context *ctx = &vcpu->arch.guest;
return to_guest(vcpu->kvm, ctx);
}
/* kvm get exit data from gvmm! */
static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.exit_data;
}
/*kvm get vcpu ioreq for kvm module!*/
static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p_ctl_data;
if (vcpu) {
p_ctl_data = kvm_get_exit_data(vcpu);
if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
return &p_ctl_data->u.ioreq;
}
return NULL;
}
#endif
/*
* mmio.c: MMIO emulation components.
* Copyright (c) 2004, Intel Corporation.
* Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
* Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
*
* Copyright (c) 2007 Intel Corporation KVM support.
* Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
*/
#include <linux/kvm_host.h>
#include "vcpu.h"
static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
{
VLSAPIC_XTP(v) = val;
}
/*
* LSAPIC OFFSET
*/
#define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
#define PIB_OFST_INTA 0x1E0000
#define PIB_OFST_XTP 0x1E0008
/*
* execute write IPI op.
*/
static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
uint64_t addr, uint64_t data)
{
struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
unsigned long psr;
local_irq_save(psr);
p->exit_reason = EXIT_REASON_IPI;
p->u.ipi_data.addr.val = addr;
p->u.ipi_data.data.val = data;
vmm_transition(current_vcpu);
local_irq_restore(psr);
}
void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
unsigned long length, unsigned long val)
{
addr &= (PIB_SIZE - 1);
switch (addr) {
case PIB_OFST_INTA:
panic_vm(v, "Undefined write on PIB INTA\n");
break;
case PIB_OFST_XTP:
if (length == 1) {
vlsapic_write_xtp(v, val);
} else {
panic_vm(v, "Undefined write on PIB XTP\n");
}
break;
default:
if (PIB_LOW_HALF(addr)) {
/*Lower half */
if (length != 8)
panic_vm(v, "Can't LHF write with size %ld!\n",
length);
else
vlsapic_write_ipi(v, addr, val);
} else { /*Upper half */
panic_vm(v, "IPI-UHF write %lx\n", addr);
}
break;
}
}
unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
unsigned long length)
{
uint64_t result = 0;
addr &= (PIB_SIZE - 1);
switch (addr) {
case PIB_OFST_INTA:
if (length == 1) /* 1 byte load */
; /* There is no i8259, there is no INTA access*/
else
panic_vm(v, "Undefined read on PIB INTA\n");
break;
case PIB_OFST_XTP:
if (length == 1) {
result = VLSAPIC_XTP(v);
} else {
panic_vm(v, "Undefined read on PIB XTP\n");
}
break;
default:
panic_vm(v, "Undefined addr access for lsapic!\n");
break;
}
return result;
}
static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
u16 s, int ma, int dir)
{
unsigned long iot;
struct exit_ctl_data *p = &vcpu->arch.exit_data;
unsigned long psr;
iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
local_irq_save(psr);
/*Intercept the access for PIB range*/
if (iot == GPFN_PIB) {
if (!dir)
lsapic_write(vcpu, src_pa, s, *dest);
else
*dest = lsapic_read(vcpu, src_pa, s);
goto out;
}
p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
p->u.ioreq.addr = src_pa;
p->u.ioreq.size = s;
p->u.ioreq.dir = dir;
if (dir == IOREQ_WRITE)
p->u.ioreq.data = *dest;
p->u.ioreq.state = STATE_IOREQ_READY;
vmm_transition(vcpu);
if (p->u.ioreq.state == STATE_IORESP_READY) {
if (dir == IOREQ_READ)
/* it's necessary to ensure zero extending */
*dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
} else
panic_vm(vcpu, "Unhandled mmio access returned!\n");
out:
local_irq_restore(psr);
return ;
}
/*
dir 1: read 0:write
inst_type 0:integer 1:floating point
*/
#define SL_INTEGER 0 /* store/load interger*/
#define SL_FLOATING 1 /* store/load floating*/
void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
{
struct kvm_pt_regs *regs;
IA64_BUNDLE bundle;
int slot, dir = 0;
int inst_type = -1;
u16 size = 0;
u64 data, slot1a, slot1b, temp, update_reg;
s32 imm;
INST64 inst;
regs = vcpu_regs(vcpu);
if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
/* if fetch code fail, return and try again */
return;
}
slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
if (!slot)
inst.inst = bundle.slot0;
else if (slot == 1) {
slot1a = bundle.slot1a;
slot1b = bundle.slot1b;
inst.inst = slot1a + (slot1b << 18);
} else if (slot == 2)
inst.inst = bundle.slot2;
/* Integer Load/Store */
if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
inst_type = SL_INTEGER;
size = (inst.M1.x6 & 0x3);
if ((inst.M1.x6 >> 2) > 0xb) {
/*write*/
dir = IOREQ_WRITE;
data = vcpu_get_gr(vcpu, inst.M4.r2);
} else if ((inst.M1.x6 >> 2) < 0xb) {
/*read*/
dir = IOREQ_READ;
}
} else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
/* Integer Load + Reg update */
inst_type = SL_INTEGER;
dir = IOREQ_READ;
size = (inst.M2.x6 & 0x3);
temp = vcpu_get_gr(vcpu, inst.M2.r3);
update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
temp += update_reg;
vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
} else if (inst.M3.major == 5) {
/*Integer Load/Store + Imm update*/
inst_type = SL_INTEGER;
size = (inst.M3.x6&0x3);
if ((inst.M5.x6 >> 2) > 0xb) {
/*write*/
dir = IOREQ_WRITE;
data = vcpu_get_gr(vcpu, inst.M5.r2);
temp = vcpu_get_gr(vcpu, inst.M5.r3);
imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
(inst.M5.imm7 << 23);
temp += imm >> 23;
vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
} else if ((inst.M3.x6 >> 2) < 0xb) {
/*read*/
dir = IOREQ_READ;
temp = vcpu_get_gr(vcpu, inst.M3.r3);
imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
(inst.M3.imm7 << 23);
temp += imm >> 23;
vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
}
} else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
&& inst.M9.m == 0 && inst.M9.x == 0) {
/* Floating-point spill*/
struct ia64_fpreg v;
inst_type = SL_FLOATING;
dir = IOREQ_WRITE;
vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
/* Write high word. FIXME: this is a kludge! */
v.u.bits[1] &= 0x3ffff;
mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1], 8,
ma, IOREQ_WRITE);
data = v.u.bits[0];
size = 3;
} else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
/* Floating-point spill + Imm update */
struct ia64_fpreg v;
inst_type = SL_FLOATING;
dir = IOREQ_WRITE;
vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
temp = vcpu_get_gr(vcpu, inst.M10.r3);
imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
(inst.M10.imm7 << 23);
temp += imm >> 23;
vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
/* Write high word.FIXME: this is a kludge! */
v.u.bits[1] &= 0x3ffff;
mmio_access(vcpu, padr + 8, (u64 *)&v.u.bits[1],
8, ma, IOREQ_WRITE);
data = v.u.bits[0];
size = 3;
} else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
/* Floating-point stf8 + Imm update */
struct ia64_fpreg v;
inst_type = SL_FLOATING;
dir = IOREQ_WRITE;
size = 3;
vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
data = v.u.bits[0]; /* Significand. */
temp = vcpu_get_gr(vcpu, inst.M10.r3);
imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
(inst.M10.imm7 << 23);
temp += imm >> 23;
vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
} else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
&& inst.M15.x6 <= 0x2f) {
temp = vcpu_get_gr(vcpu, inst.M15.r3);
imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
(inst.M15.imm7 << 23);
temp += imm >> 23;
vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
vcpu_increment_iip(vcpu);
return;
} else if (inst.M12.major == 6 && inst.M12.m == 1
&& inst.M12.x == 1 && inst.M12.x6 == 1) {
/* Floating-point Load Pair + Imm ldfp8 M12*/
struct ia64_fpreg v;
inst_type = SL_FLOATING;
dir = IOREQ_READ;
size = 8; /*ldfd*/
mmio_access(vcpu, padr, &data, size, ma, dir);
v.u.bits[0] = data;
v.u.bits[1] = 0x1003E;
vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
padr += 8;
mmio_access(vcpu, padr, &data, size, ma, dir);
v.u.bits[0] = data;
v.u.bits[1] = 0x1003E;
vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
padr += 8;
vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
vcpu_increment_iip(vcpu);
return;
} else {
inst_type = -1;
panic_vm(vcpu, "Unsupported MMIO access instruction! "
"Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
bundle.i64[0], bundle.i64[1]);
}
size = 1 << size;
if (dir == IOREQ_WRITE) {
mmio_access(vcpu, padr, &data, size, ma, dir);
} else {
mmio_access(vcpu, padr, &data, size, ma, dir);
if (inst_type == SL_INTEGER)
vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
else
panic_vm(vcpu, "Unsupported instruction type!\n");
}
vcpu_increment_iip(vcpu);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* vmm.c: vmm module interface with kvm module
*
* Copyright (c) 2007, Intel Corporation.
*
* Xiantao Zhang (xiantao.zhang@intel.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/fpswa.h>
#include "vcpu.h"
MODULE_AUTHOR("Intel");
MODULE_LICENSE("GPL");
extern char kvm_ia64_ivt;
extern char kvm_asm_mov_from_ar;
extern char kvm_asm_mov_from_ar_sn2;
extern fpswa_interface_t *vmm_fpswa_interface;
long vmm_sanity = 1;
struct kvm_vmm_info vmm_info = {
.module = THIS_MODULE,
.vmm_entry = vmm_entry,
.tramp_entry = vmm_trampoline,
.vmm_ivt = (unsigned long)&kvm_ia64_ivt,
.patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
.patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
};
static int __init kvm_vmm_init(void)
{
vmm_fpswa_interface = fpswa_interface;
/*Register vmm data to kvm side*/
return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
}
static void __exit kvm_vmm_exit(void)
{
kvm_exit();
return ;
}
void vmm_spin_lock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_lock(lock);
}
void vmm_spin_unlock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_unlock(lock);
}
static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
{
struct exit_ctl_data *p = &vcpu->arch.exit_data;
long psr;
local_irq_save(psr);
p->exit_reason = EXIT_REASON_DEBUG;
vmm_transition(vcpu);
local_irq_restore(psr);
}
asmlinkage int printk(const char *fmt, ...)
{
struct kvm_vcpu *vcpu = current_vcpu;
va_list args;
int r;
memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
va_start(args, fmt);
r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
va_end(args);
vcpu_debug_exit(vcpu);
return r;
}
module_init(kvm_vmm_init)
module_exit(kvm_vmm_exit)
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -586,11 +586,6 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
case IOAPIC_REG_WINDOW:
ioapic_write_indirect(ioapic, data);
break;
#ifdef CONFIG_IA64
case IOAPIC_REG_EOI:
__kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG);
break;
#endif
default:
break;
......
......@@ -19,7 +19,6 @@ struct kvm_vcpu;
/* Direct registers. */
#define IOAPIC_REG_SELECT 0x00
#define IOAPIC_REG_WINDOW 0x10
#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
/* Indirect registers. */
#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
......
......@@ -26,9 +26,6 @@
#include <trace/events/kvm.h>
#include <asm/msidef.h>
#ifdef CONFIG_IA64
#include <asm/iosapic.h>
#endif
#include "irq.h"
......@@ -57,12 +54,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
{
#ifdef CONFIG_IA64
return irq->delivery_mode ==
(IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
#else
return irq->delivery_mode == APIC_DM_LOWEST;
#endif
}
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
......@@ -346,20 +338,6 @@ static const struct kvm_irq_routing_entry default_routing[] = {
ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
#ifdef CONFIG_IA64
ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
#endif
};
int kvm_setup_default_irq_routing(struct kvm *kvm)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment