Commit cfce216e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'hyperv-next-signed-20240320' of...

Merge tag 'hyperv-next-signed-20240320' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv updates from Wei Liu:

 - Use Hyper-V entropy to seed guest random number generator (Michael
   Kelley)

 - Convert to platform remove callback returning void for vmbus (Uwe
   Kleine-König)

 - Introduce hv_get_hypervisor_version function (Nuno Das Neves)

 - Rename some HV_REGISTER_* defines for consistency (Nuno Das Neves)

 - Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_* (Nuno Das
   Neves)

 - Cosmetic changes for hv_spinlock.c (Purna Pavan Chandra Aekkaladevi)

 - Use per cpu initial stack for vtl context (Saurabh Sengar)

* tag 'hyperv-next-signed-20240320' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
  x86/hyperv: Use Hyper-V entropy to seed guest random number generator
  x86/hyperv: Cosmetic changes for hv_spinlock.c
  hyperv-tlfs: Rename some HV_REGISTER_* defines for consistency
  hv: vmbus: Convert to platform remove callback returning void
  mshyperv: Introduce hv_get_hypervisor_version function
  x86/hyperv: Use per cpu initial stack for vtl context
  hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
parents 7b65c810 f2580a90
......@@ -160,22 +160,22 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
return;
panic_reported = true;
guest_id = hv_get_vpreg(HV_REGISTER_GUEST_OSID);
guest_id = hv_get_vpreg(HV_REGISTER_GUEST_OS_ID);
/*
* Hyper-V provides the ability to store only 5 values.
* Pick the passed in error value, the guest_id, the PC,
* and the SP.
*/
hv_set_vpreg(HV_REGISTER_CRASH_P0, err);
hv_set_vpreg(HV_REGISTER_CRASH_P1, guest_id);
hv_set_vpreg(HV_REGISTER_CRASH_P2, regs->pc);
hv_set_vpreg(HV_REGISTER_CRASH_P3, regs->sp);
hv_set_vpreg(HV_REGISTER_CRASH_P4, 0);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P0, err);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P1, guest_id);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P2, regs->pc);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P3, regs->sp);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_P4, 0);
/*
* Let Hyper-V know there is crash data available
*/
hv_set_vpreg(HV_REGISTER_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
hv_set_vpreg(HV_REGISTER_GUEST_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
......@@ -19,10 +19,17 @@
static bool hyperv_initialized;
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
{
hv_get_vpreg_128(HV_REGISTER_HYPERVISOR_VERSION,
(struct hv_get_vp_registers_output *)info);
return 0;
}
static int __init hyperv_init(void)
{
struct hv_get_vp_registers_output result;
u32 a, b, c, d;
u64 guest_id;
int ret;
......@@ -39,7 +46,7 @@ static int __init hyperv_init(void)
/* Setup the guest ID */
guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
hv_set_vpreg(HV_REGISTER_GUEST_OSID, guest_id);
hv_set_vpreg(HV_REGISTER_GUEST_OS_ID, guest_id);
/* Get the features and hints from Hyper-V */
hv_get_vpreg_128(HV_REGISTER_FEATURES, &result);
......@@ -54,15 +61,6 @@ static int __init hyperv_init(void)
ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
ms_hyperv.misc_features);
/* Get information about the Hyper-V host version */
hv_get_vpreg_128(HV_REGISTER_HYPERVISOR_VERSION, &result);
a = result.as32.a;
b = result.as32.b;
c = result.as32.c;
d = result.as32.d;
pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
b >> 16, b & 0xFFFF, a, d & 0xFFFFFF, c, d >> 24);
ret = hv_common_init();
if (ret)
return ret;
......@@ -74,6 +72,8 @@ static int __init hyperv_init(void)
return ret;
}
ms_hyperv_late_init();
hyperv_initialized = true;
return 0;
}
......
......@@ -21,14 +21,6 @@
* byte ordering of Linux running on ARM64, so no special handling is required.
*/
/*
* These Hyper-V registers provide information equivalent to the CPUID
* instruction on x86/x64.
*/
#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
/*
* Group C Features. See the asm-generic version of hyperv-tlfs.h
* for a description of Feature Groups.
......@@ -41,28 +33,29 @@
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(13)
/*
* Synthetic register definitions equivalent to MSRs on x86/x64
* To support arch-generic code calling hv_set/get_register:
* - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
* - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
*/
#define HV_REGISTER_CRASH_P0 0x00000210
#define HV_REGISTER_CRASH_P1 0x00000211
#define HV_REGISTER_CRASH_P2 0x00000212
#define HV_REGISTER_CRASH_P3 0x00000213
#define HV_REGISTER_CRASH_P4 0x00000214
#define HV_REGISTER_CRASH_CTL 0x00000215
#define HV_MSR_CRASH_P0 (HV_REGISTER_GUEST_CRASH_P0)
#define HV_MSR_CRASH_P1 (HV_REGISTER_GUEST_CRASH_P1)
#define HV_MSR_CRASH_P2 (HV_REGISTER_GUEST_CRASH_P2)
#define HV_MSR_CRASH_P3 (HV_REGISTER_GUEST_CRASH_P3)
#define HV_MSR_CRASH_P4 (HV_REGISTER_GUEST_CRASH_P4)
#define HV_MSR_CRASH_CTL (HV_REGISTER_GUEST_CRASH_CTL)
#define HV_REGISTER_GUEST_OSID 0x00090002
#define HV_REGISTER_VP_INDEX 0x00090003
#define HV_REGISTER_TIME_REF_COUNT 0x00090004
#define HV_REGISTER_REFERENCE_TSC 0x00090017
#define HV_MSR_VP_INDEX (HV_REGISTER_VP_INDEX)
#define HV_MSR_TIME_REF_COUNT (HV_REGISTER_TIME_REF_COUNT)
#define HV_MSR_REFERENCE_TSC (HV_REGISTER_REFERENCE_TSC)
#define HV_REGISTER_SINT0 0x000A0000
#define HV_REGISTER_SCONTROL 0x000A0010
#define HV_REGISTER_SIEFP 0x000A0012
#define HV_REGISTER_SIMP 0x000A0013
#define HV_REGISTER_EOM 0x000A0014
#define HV_MSR_SINT0 (HV_REGISTER_SINT0)
#define HV_MSR_SCONTROL (HV_REGISTER_SCONTROL)
#define HV_MSR_SIEFP (HV_REGISTER_SIEFP)
#define HV_MSR_SIMP (HV_REGISTER_SIMP)
#define HV_MSR_EOM (HV_REGISTER_EOM)
#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
#define HV_REGISTER_STIMER0_COUNT 0x000B0001
#define HV_MSR_STIMER0_CONFIG (HV_REGISTER_STIMER0_CONFIG)
#define HV_MSR_STIMER0_COUNT (HV_REGISTER_STIMER0_COUNT)
union hv_msi_entry {
u64 as_uint64[2];
......
......@@ -31,12 +31,12 @@ void hv_set_vpreg(u32 reg, u64 value);
u64 hv_get_vpreg(u32 reg);
void hv_get_vpreg_128(u32 reg, struct hv_get_vp_registers_output *result);
static inline void hv_set_register(unsigned int reg, u64 value)
static inline void hv_set_msr(unsigned int reg, u64 value)
{
hv_set_vpreg(reg, value);
}
static inline u64 hv_get_register(unsigned int reg)
static inline u64 hv_get_msr(unsigned int reg)
{
return hv_get_vpreg(reg);
}
......
......@@ -667,14 +667,14 @@ void hyperv_cleanup(void)
hv_hypercall_pg = NULL;
/* Reset the hypercall page */
hypercall_msr.as_uint64 = hv_get_register(HV_X64_MSR_HYPERCALL);
hypercall_msr.as_uint64 = hv_get_msr(HV_X64_MSR_HYPERCALL);
hypercall_msr.enable = 0;
hv_set_register(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
hv_set_msr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/* Reset the TSC page */
tsc_msr.as_uint64 = hv_get_register(HV_X64_MSR_REFERENCE_TSC);
tsc_msr.as_uint64 = hv_get_msr(HV_X64_MSR_REFERENCE_TSC);
tsc_msr.enable = 0;
hv_set_register(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
hv_set_msr(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
......
......@@ -16,7 +16,7 @@
#include <asm/paravirt.h>
#include <asm/apic.h>
static bool __initdata hv_pvspin = true;
static bool hv_pvspin __initdata = true;
static void hv_qlock_kick(int cpu)
{
......@@ -64,6 +64,7 @@ __visible bool hv_vcpu_is_preempted(int vcpu)
{
return false;
}
PV_CALLEE_SAVE_REGS_THUNK(hv_vcpu_is_preempted);
void __init hv_init_spinlocks(void)
......
......@@ -12,6 +12,7 @@
#include <asm/i8259.h>
#include <asm/mshyperv.h>
#include <asm/realmode.h>
#include <../kernel/smpboot.h>
extern struct boot_params boot_params;
static struct real_mode_header hv_vtl_real_mode_header;
......@@ -65,7 +66,7 @@ static void hv_vtl_ap_entry(void)
((secondary_startup_64_fn)secondary_startup_64)(&boot_params, &boot_params);
}
static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
{
u64 status;
int ret = 0;
......@@ -79,7 +80,9 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
struct ldttss_desc *ldt;
struct desc_struct *gdt;
u64 rsp = current->thread.sp;
struct task_struct *idle = idle_thread_get(cpu);
u64 rsp = (unsigned long)idle->thread.sp;
u64 rip = (u64)&hv_vtl_ap_entry;
native_store_gdt(&gdt_ptr);
......@@ -206,7 +209,15 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
{
int vp_id;
int vp_id, cpu;
/* Find the logical CPU for the APIC ID */
for_each_present_cpu(cpu) {
if (arch_match_cpu_phys_id(cpu, apicid))
break;
}
if (cpu >= nr_cpu_ids)
return -EINVAL;
pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
vp_id = hv_vtl_apicid_to_vp_id(apicid);
......@@ -220,7 +231,7 @@ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
return -EINVAL;
}
return hv_vtl_bringup_vcpu(vp_id, start_eip);
return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
}
int __init hv_vtl_early_init(void)
......
......@@ -182,7 +182,7 @@ enum hv_isolation_type {
#define HV_X64_MSR_HYPERCALL 0x40000001
/* MSR used to provide vcpu index */
#define HV_REGISTER_VP_INDEX 0x40000002
#define HV_X64_MSR_VP_INDEX 0x40000002
/* MSR used to reset the guest OS. */
#define HV_X64_MSR_RESET 0x40000003
......@@ -191,10 +191,10 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_RUNTIME 0x40000010
/* MSR used to read the per-partition time reference counter */
#define HV_REGISTER_TIME_REF_COUNT 0x40000020
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
/* A partition's reference time stamp counter (TSC) page */
#define HV_REGISTER_REFERENCE_TSC 0x40000021
#define HV_X64_MSR_REFERENCE_TSC 0x40000021
/* MSR used to retrieve the TSC frequency */
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
......@@ -209,61 +209,61 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
/* Define synthetic interrupt controller model specific registers. */
#define HV_REGISTER_SCONTROL 0x40000080
#define HV_REGISTER_SVERSION 0x40000081
#define HV_REGISTER_SIEFP 0x40000082
#define HV_REGISTER_SIMP 0x40000083
#define HV_REGISTER_EOM 0x40000084
#define HV_REGISTER_SINT0 0x40000090
#define HV_REGISTER_SINT1 0x40000091
#define HV_REGISTER_SINT2 0x40000092
#define HV_REGISTER_SINT3 0x40000093
#define HV_REGISTER_SINT4 0x40000094
#define HV_REGISTER_SINT5 0x40000095
#define HV_REGISTER_SINT6 0x40000096
#define HV_REGISTER_SINT7 0x40000097
#define HV_REGISTER_SINT8 0x40000098
#define HV_REGISTER_SINT9 0x40000099
#define HV_REGISTER_SINT10 0x4000009A
#define HV_REGISTER_SINT11 0x4000009B
#define HV_REGISTER_SINT12 0x4000009C
#define HV_REGISTER_SINT13 0x4000009D
#define HV_REGISTER_SINT14 0x4000009E
#define HV_REGISTER_SINT15 0x4000009F
#define HV_X64_MSR_SCONTROL 0x40000080
#define HV_X64_MSR_SVERSION 0x40000081
#define HV_X64_MSR_SIEFP 0x40000082
#define HV_X64_MSR_SIMP 0x40000083
#define HV_X64_MSR_EOM 0x40000084
#define HV_X64_MSR_SINT0 0x40000090
#define HV_X64_MSR_SINT1 0x40000091
#define HV_X64_MSR_SINT2 0x40000092
#define HV_X64_MSR_SINT3 0x40000093
#define HV_X64_MSR_SINT4 0x40000094
#define HV_X64_MSR_SINT5 0x40000095
#define HV_X64_MSR_SINT6 0x40000096
#define HV_X64_MSR_SINT7 0x40000097
#define HV_X64_MSR_SINT8 0x40000098
#define HV_X64_MSR_SINT9 0x40000099
#define HV_X64_MSR_SINT10 0x4000009A
#define HV_X64_MSR_SINT11 0x4000009B
#define HV_X64_MSR_SINT12 0x4000009C
#define HV_X64_MSR_SINT13 0x4000009D
#define HV_X64_MSR_SINT14 0x4000009E
#define HV_X64_MSR_SINT15 0x4000009F
/*
* Define synthetic interrupt controller model specific registers for
* nested hypervisor.
*/
#define HV_REGISTER_NESTED_SCONTROL 0x40001080
#define HV_REGISTER_NESTED_SVERSION 0x40001081
#define HV_REGISTER_NESTED_SIEFP 0x40001082
#define HV_REGISTER_NESTED_SIMP 0x40001083
#define HV_REGISTER_NESTED_EOM 0x40001084
#define HV_REGISTER_NESTED_SINT0 0x40001090
#define HV_X64_MSR_NESTED_SCONTROL 0x40001080
#define HV_X64_MSR_NESTED_SVERSION 0x40001081
#define HV_X64_MSR_NESTED_SIEFP 0x40001082
#define HV_X64_MSR_NESTED_SIMP 0x40001083
#define HV_X64_MSR_NESTED_EOM 0x40001084
#define HV_X64_MSR_NESTED_SINT0 0x40001090
/*
* Synthetic Timer MSRs. Four timers per vcpu.
*/
#define HV_REGISTER_STIMER0_CONFIG 0x400000B0
#define HV_REGISTER_STIMER0_COUNT 0x400000B1
#define HV_REGISTER_STIMER1_CONFIG 0x400000B2
#define HV_REGISTER_STIMER1_COUNT 0x400000B3
#define HV_REGISTER_STIMER2_CONFIG 0x400000B4
#define HV_REGISTER_STIMER2_COUNT 0x400000B5
#define HV_REGISTER_STIMER3_CONFIG 0x400000B6
#define HV_REGISTER_STIMER3_COUNT 0x400000B7
#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
/* Hyper-V guest idle MSR */
#define HV_X64_MSR_GUEST_IDLE 0x400000F0
/* Hyper-V guest crash notification MSR's */
#define HV_REGISTER_CRASH_P0 0x40000100
#define HV_REGISTER_CRASH_P1 0x40000101
#define HV_REGISTER_CRASH_P2 0x40000102
#define HV_REGISTER_CRASH_P3 0x40000103
#define HV_REGISTER_CRASH_P4 0x40000104
#define HV_REGISTER_CRASH_CTL 0x40000105
#define HV_X64_MSR_CRASH_P0 0x40000100
#define HV_X64_MSR_CRASH_P1 0x40000101
#define HV_X64_MSR_CRASH_P2 0x40000102
#define HV_X64_MSR_CRASH_P3 0x40000103
#define HV_X64_MSR_CRASH_P4 0x40000104
#define HV_X64_MSR_CRASH_CTL 0x40000105
/* TSC emulation after migration */
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
......@@ -276,31 +276,38 @@ enum hv_isolation_type {
/* HV_X64_MSR_TSC_INVARIANT_CONTROL bits */
#define HV_EXPOSE_INVARIANT_TSC BIT_ULL(0)
/* Register name aliases for temporary compatibility */
#define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT
#define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG
#define HV_X64_MSR_STIMER1_COUNT HV_REGISTER_STIMER1_COUNT
#define HV_X64_MSR_STIMER1_CONFIG HV_REGISTER_STIMER1_CONFIG
#define HV_X64_MSR_STIMER2_COUNT HV_REGISTER_STIMER2_COUNT
#define HV_X64_MSR_STIMER2_CONFIG HV_REGISTER_STIMER2_CONFIG
#define HV_X64_MSR_STIMER3_COUNT HV_REGISTER_STIMER3_COUNT
#define HV_X64_MSR_STIMER3_CONFIG HV_REGISTER_STIMER3_CONFIG
#define HV_X64_MSR_SCONTROL HV_REGISTER_SCONTROL
#define HV_X64_MSR_SVERSION HV_REGISTER_SVERSION
#define HV_X64_MSR_SIMP HV_REGISTER_SIMP
#define HV_X64_MSR_SIEFP HV_REGISTER_SIEFP
#define HV_X64_MSR_VP_INDEX HV_REGISTER_VP_INDEX
#define HV_X64_MSR_EOM HV_REGISTER_EOM
#define HV_X64_MSR_SINT0 HV_REGISTER_SINT0
#define HV_X64_MSR_SINT15 HV_REGISTER_SINT15
#define HV_X64_MSR_CRASH_P0 HV_REGISTER_CRASH_P0
#define HV_X64_MSR_CRASH_P1 HV_REGISTER_CRASH_P1
#define HV_X64_MSR_CRASH_P2 HV_REGISTER_CRASH_P2
#define HV_X64_MSR_CRASH_P3 HV_REGISTER_CRASH_P3
#define HV_X64_MSR_CRASH_P4 HV_REGISTER_CRASH_P4
#define HV_X64_MSR_CRASH_CTL HV_REGISTER_CRASH_CTL
#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT
#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC
/*
* To support arch-generic code calling hv_set/get_register:
* - On x86, HV_MSR_ indicates an MSR accessed via rdmsrl/wrmsrl
* - On ARM, HV_MSR_ indicates a VP register accessed via hypercall
*/
#define HV_MSR_CRASH_P0 (HV_X64_MSR_CRASH_P0)
#define HV_MSR_CRASH_P1 (HV_X64_MSR_CRASH_P1)
#define HV_MSR_CRASH_P2 (HV_X64_MSR_CRASH_P2)
#define HV_MSR_CRASH_P3 (HV_X64_MSR_CRASH_P3)
#define HV_MSR_CRASH_P4 (HV_X64_MSR_CRASH_P4)
#define HV_MSR_CRASH_CTL (HV_X64_MSR_CRASH_CTL)
#define HV_MSR_VP_INDEX (HV_X64_MSR_VP_INDEX)
#define HV_MSR_TIME_REF_COUNT (HV_X64_MSR_TIME_REF_COUNT)
#define HV_MSR_REFERENCE_TSC (HV_X64_MSR_REFERENCE_TSC)
#define HV_MSR_SINT0 (HV_X64_MSR_SINT0)
#define HV_MSR_SVERSION (HV_X64_MSR_SVERSION)
#define HV_MSR_SCONTROL (HV_X64_MSR_SCONTROL)
#define HV_MSR_SIEFP (HV_X64_MSR_SIEFP)
#define HV_MSR_SIMP (HV_X64_MSR_SIMP)
#define HV_MSR_EOM (HV_X64_MSR_EOM)
#define HV_MSR_NESTED_SCONTROL (HV_X64_MSR_NESTED_SCONTROL)
#define HV_MSR_NESTED_SVERSION (HV_X64_MSR_NESTED_SVERSION)
#define HV_MSR_NESTED_SIEFP (HV_X64_MSR_NESTED_SIEFP)
#define HV_MSR_NESTED_SIMP (HV_X64_MSR_NESTED_SIMP)
#define HV_MSR_NESTED_EOM (HV_X64_MSR_NESTED_EOM)
#define HV_MSR_NESTED_SINT0 (HV_X64_MSR_NESTED_SINT0)
#define HV_MSR_STIMER0_CONFIG (HV_X64_MSR_STIMER0_CONFIG)
#define HV_MSR_STIMER0_COUNT (HV_X64_MSR_STIMER0_COUNT)
/*
* Registers are only accessible via HVCALL_GET_VP_REGISTERS hvcall and
......
......@@ -293,24 +293,24 @@ static inline void hv_ivm_msr_write(u64 msr, u64 value) {}
static inline void hv_ivm_msr_read(u64 msr, u64 *value) {}
#endif
static inline bool hv_is_synic_reg(unsigned int reg)
static inline bool hv_is_synic_msr(unsigned int reg)
{
return (reg >= HV_REGISTER_SCONTROL) &&
(reg <= HV_REGISTER_SINT15);
return (reg >= HV_X64_MSR_SCONTROL) &&
(reg <= HV_X64_MSR_SINT15);
}
static inline bool hv_is_sint_reg(unsigned int reg)
static inline bool hv_is_sint_msr(unsigned int reg)
{
return (reg >= HV_REGISTER_SINT0) &&
(reg <= HV_REGISTER_SINT15);
return (reg >= HV_X64_MSR_SINT0) &&
(reg <= HV_X64_MSR_SINT15);
}
u64 hv_get_register(unsigned int reg);
void hv_set_register(unsigned int reg, u64 value);
u64 hv_get_non_nested_register(unsigned int reg);
void hv_set_non_nested_register(unsigned int reg, u64 value);
u64 hv_get_msr(unsigned int reg);
void hv_set_msr(unsigned int reg, u64 value);
u64 hv_get_non_nested_msr(unsigned int reg);
void hv_set_non_nested_msr(unsigned int reg, u64 value);
static __always_inline u64 hv_raw_get_register(unsigned int reg)
static __always_inline u64 hv_raw_get_msr(unsigned int reg)
{
return __rdmsr(reg);
}
......@@ -331,10 +331,10 @@ static inline int hyperv_flush_guest_mapping_range(u64 as,
{
return -1;
}
static inline void hv_set_register(unsigned int reg, u64 value) { }
static inline u64 hv_get_register(unsigned int reg) { return 0; }
static inline void hv_set_non_nested_register(unsigned int reg, u64 value) { }
static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; }
static inline void hv_set_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_msr(unsigned int reg) { return 0; }
static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
#endif /* CONFIG_HYPERV */
......
......@@ -45,70 +45,70 @@ bool hyperv_paravisor_present __ro_after_init;
EXPORT_SYMBOL_GPL(hyperv_paravisor_present);
#if IS_ENABLED(CONFIG_HYPERV)
static inline unsigned int hv_get_nested_reg(unsigned int reg)
static inline unsigned int hv_get_nested_msr(unsigned int reg)
{
if (hv_is_sint_reg(reg))
return reg - HV_REGISTER_SINT0 + HV_REGISTER_NESTED_SINT0;
if (hv_is_sint_msr(reg))
return reg - HV_X64_MSR_SINT0 + HV_X64_MSR_NESTED_SINT0;
switch (reg) {
case HV_REGISTER_SIMP:
return HV_REGISTER_NESTED_SIMP;
case HV_REGISTER_SIEFP:
return HV_REGISTER_NESTED_SIEFP;
case HV_REGISTER_SVERSION:
return HV_REGISTER_NESTED_SVERSION;
case HV_REGISTER_SCONTROL:
return HV_REGISTER_NESTED_SCONTROL;
case HV_REGISTER_EOM:
return HV_REGISTER_NESTED_EOM;
case HV_X64_MSR_SIMP:
return HV_X64_MSR_NESTED_SIMP;
case HV_X64_MSR_SIEFP:
return HV_X64_MSR_NESTED_SIEFP;
case HV_X64_MSR_SVERSION:
return HV_X64_MSR_NESTED_SVERSION;
case HV_X64_MSR_SCONTROL:
return HV_X64_MSR_NESTED_SCONTROL;
case HV_X64_MSR_EOM:
return HV_X64_MSR_NESTED_EOM;
default:
return reg;
}
}
u64 hv_get_non_nested_register(unsigned int reg)
u64 hv_get_non_nested_msr(unsigned int reg)
{
u64 value;
if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present)
if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present)
hv_ivm_msr_read(reg, &value);
else
rdmsrl(reg, value);
return value;
}
EXPORT_SYMBOL_GPL(hv_get_non_nested_register);
EXPORT_SYMBOL_GPL(hv_get_non_nested_msr);
void hv_set_non_nested_register(unsigned int reg, u64 value)
void hv_set_non_nested_msr(unsigned int reg, u64 value)
{
if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) {
if (hv_is_synic_msr(reg) && ms_hyperv.paravisor_present) {
hv_ivm_msr_write(reg, value);
/* Write proxy bit via wrmsl instruction */
if (hv_is_sint_reg(reg))
if (hv_is_sint_msr(reg))
wrmsrl(reg, value | 1 << 20);
} else {
wrmsrl(reg, value);
}
}
EXPORT_SYMBOL_GPL(hv_set_non_nested_register);
EXPORT_SYMBOL_GPL(hv_set_non_nested_msr);
u64 hv_get_register(unsigned int reg)
u64 hv_get_msr(unsigned int reg)
{
if (hv_nested)
reg = hv_get_nested_reg(reg);
reg = hv_get_nested_msr(reg);
return hv_get_non_nested_register(reg);
return hv_get_non_nested_msr(reg);
}
EXPORT_SYMBOL_GPL(hv_get_register);
EXPORT_SYMBOL_GPL(hv_get_msr);
void hv_set_register(unsigned int reg, u64 value)
void hv_set_msr(unsigned int reg, u64 value)
{
if (hv_nested)
reg = hv_get_nested_reg(reg);
reg = hv_get_nested_msr(reg);
hv_set_non_nested_register(reg, value);
hv_set_non_nested_msr(reg, value);
}
EXPORT_SYMBOL_GPL(hv_set_register);
EXPORT_SYMBOL_GPL(hv_set_msr);
static void (*vmbus_handler)(void);
static void (*hv_stimer0_handler)(void);
......@@ -352,13 +352,24 @@ static void __init reduced_hw_init(void)
x86_init.irqs.pre_vector_init = x86_init_noop;
}
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info)
{
unsigned int hv_max_functions;
hv_max_functions = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
if (hv_max_functions < HYPERV_CPUID_VERSION) {
pr_err("%s: Could not detect Hyper-V version\n", __func__);
return -ENODEV;
}
cpuid(HYPERV_CPUID_VERSION, &info->eax, &info->ebx, &info->ecx, &info->edx);
return 0;
}
static void __init ms_hyperv_init_platform(void)
{
int hv_max_functions_eax;
int hv_host_info_eax;
int hv_host_info_ebx;
int hv_host_info_ecx;
int hv_host_info_edx;
#ifdef CONFIG_PARAVIRT
pv_info.name = "Hyper-V";
......@@ -409,21 +420,6 @@ static void __init ms_hyperv_init_platform(void)
pr_info("Hyper-V: running on a nested hypervisor\n");
}
/*
* Extract host information.
*/
if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
hv_host_info_edx = cpuid_edx(HYPERV_CPUID_VERSION);
pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
hv_host_info_ebx >> 16, hv_host_info_ebx & 0xFFFF,
hv_host_info_eax, hv_host_info_edx & 0xFFFFFF,
hv_host_info_ecx, hv_host_info_edx >> 24);
}
if (ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE) {
x86_platform.calibrate_tsc = hv_get_tsc_khz;
......@@ -456,7 +452,7 @@ static void __init ms_hyperv_init_platform(void)
/* To be supported: more work is required. */
ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
/* HV_REGISTER_CRASH_CTL is unsupported. */
/* HV_MSR_CRASH_CTL is unsupported. */
ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
/* Don't trust Hyper-V's TLB-flushing hypercalls. */
......@@ -648,6 +644,7 @@ const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.init.x2apic_available = ms_hyperv_x2apic_available,
.init.msi_ext_dest_id = ms_hyperv_msi_ext_dest_id,
.init.init_platform = ms_hyperv_init_platform,
.init.guest_late_init = ms_hyperv_late_init,
#ifdef CONFIG_AMD_MEM_ENCRYPT
.runtime.sev_es_hcall_prepare = hv_sev_es_hcall_prepare,
.runtime.sev_es_hcall_finish = hv_sev_es_hcall_finish,
......
......@@ -81,14 +81,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
hv_set_msr(HV_MSR_STIMER0_COUNT, 0);
hv_set_msr(HV_MSR_STIMER0_CONFIG, 0);
if (direct_mode_enabled && stimer0_irq >= 0)
disable_percpu_irq(stimer0_irq);
......@@ -119,7 +119,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
......@@ -372,11 +372,11 @@ static __always_inline u64 read_hv_clock_msr(void)
* is set to 0 when the partition is created and is incremented in 100
* nanosecond units.
*
* Use hv_raw_get_register() because this function is used from
* noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
* Use hv_raw_get_msr() because this function is used from
* noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic
* register it doesn't need the GHCB path.
*/
return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT);
}
/*
......@@ -439,9 +439,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 0;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
......@@ -450,10 +450,10 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
......@@ -555,14 +555,14 @@ static void __init hv_init_tsc_clocksource(void)
* thus TSC clocksource will work even without the real TSC page
* mapped.
*/
tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
if (hv_root_partition)
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
......
......@@ -16,6 +16,7 @@ config HYPERV
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
depends on X86_64 && HYPERV
depends on SMP
default n
help
Virtual Secure Mode (VSM) is a set of hypervisor capabilities and
......
......@@ -270,7 +270,7 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
......@@ -286,10 +286,10 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
......@@ -305,13 +305,12 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
enable_percpu_irq(vmbus_irq, 0);
shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
......@@ -326,14 +325,13 @@ void hv_synic_enable_regs(unsigned int cpu)
#else
shared_sint.auto_eoi = 0;
#endif
hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 1;
hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
......@@ -357,17 +355,15 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
/*
* In Isolation VM, sim and sief pages are allocated by
* paravisor. These pages also will be used by kdump
......@@ -382,9 +378,9 @@ void hv_synic_disable_regs(unsigned int cpu)
simp.base_simp_gpa = 0;
}
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition) {
......@@ -394,12 +390,12 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.base_siefp_gpa = 0;
}
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 0;
hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
if (vmbus_irq != -1)
disable_percpu_irq(vmbus_irq);
......
......@@ -20,8 +20,11 @@
#include <linux/sched/task_stack.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
#include <linux/random.h>
#include <linux/efi.h>
#include <linux/kdebug.h>
#include <linux/kmsg_dump.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
......@@ -227,19 +230,19 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* contain the size of the panic data in that page. Rest of the
* registers are no-op when the NOTIFY_MSG flag is set.
*/
hv_set_register(HV_REGISTER_CRASH_P0, 0);
hv_set_register(HV_REGISTER_CRASH_P1, 0);
hv_set_register(HV_REGISTER_CRASH_P2, 0);
hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
hv_set_msr(HV_MSR_CRASH_P0, 0);
hv_set_msr(HV_MSR_CRASH_P1, 0);
hv_set_msr(HV_MSR_CRASH_P2, 0);
hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page));
hv_set_msr(HV_MSR_CRASH_P4, bytes_written);
/*
* Let Hyper-V know there is crash data available along with
* the panic message.
*/
hv_set_register(HV_REGISTER_CRASH_CTL,
(HV_CRASH_CTL_CRASH_NOTIFY |
HV_CRASH_CTL_CRASH_NOTIFY_MSG));
hv_set_msr(HV_MSR_CRASH_CTL,
(HV_CRASH_CTL_CRASH_NOTIFY |
HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
......@@ -278,6 +281,14 @@ static void hv_kmsg_dump_register(void)
int __init hv_common_init(void)
{
int i;
union hv_hypervisor_version_info version;
/* Get information about the Hyper-V host version */
if (!hv_get_hypervisor_version(&version))
pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
version.major_version, version.minor_version,
version.build_number, version.service_number,
version.service_pack, version.service_branch);
if (hv_is_isolation_supported())
sysctl_record_panic_msg = 0;
......@@ -310,7 +321,7 @@ int __init hv_common_init(void)
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
......@@ -347,6 +358,72 @@ int __init hv_common_init(void)
return 0;
}
void __init ms_hyperv_late_init(void)
{
struct acpi_table_header *header;
acpi_status status;
u8 *randomdata;
u32 length, i;
/*
* Seed the Linux random number generator with entropy provided by
* the Hyper-V host in ACPI table OEM0.
*/
if (!IS_ENABLED(CONFIG_ACPI))
return;
status = acpi_get_table("OEM0", 0, &header);
if (ACPI_FAILURE(status) || !header)
return;
/*
* Since the "OEM0" table name is for OEM specific usage, verify
* that what we're seeing purports to be from Microsoft.
*/
if (strncmp(header->oem_table_id, "MICROSFT", 8))
goto error;
/*
* Ensure the length is reasonable. Requiring at least 8 bytes and
* no more than 4K bytes is somewhat arbitrary and just protects
* against a malformed table. Hyper-V currently provides 64 bytes,
* but allow for a change in a later version.
*/
if (header->length < sizeof(*header) + 8 ||
header->length > sizeof(*header) + SZ_4K)
goto error;
length = header->length - sizeof(*header);
randomdata = (u8 *)(header + 1);
pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n",
length);
add_bootloader_randomness(randomdata, length);
/*
* To prevent the seed data from being visible in /sys/firmware/acpi,
* zero out the random data in the ACPI table and fixup the checksum.
* The zero'ing is done out of an abundance of caution in avoiding
* potential security risks to the rng. Similarly, reset the table
* length to just the header size so that a subsequent kexec doesn't
* try to use the zero'ed out random data.
*/
for (i = 0; i < length; i++) {
header->checksum += randomdata[i];
randomdata[i] = 0;
}
for (i = 0; i < sizeof(header->length); i++)
header->checksum += ((u8 *)&header->length)[i];
header->length = sizeof(*header);
for (i = 0; i < sizeof(header->length); i++)
header->checksum -= ((u8 *)&header->length)[i];
error:
acpi_put_table(header);
}
/*
* Hyper-V specific initialization and die code for
* individual CPUs that is common across all architectures.
......@@ -409,7 +486,7 @@ int hv_common_cpu_init(unsigned int cpu)
*inputarg = mem;
}
msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX);
hv_vp_index[cpu] = msr_vp_index;
......@@ -506,7 +583,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
*/
static u64 __hv_read_ref_counter(void)
{
return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
return hv_get_msr(HV_MSR_TIME_REF_COUNT);
}
u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
......
......@@ -2359,10 +2359,9 @@ static int vmbus_platform_driver_probe(struct platform_device *pdev)
return vmbus_acpi_add(pdev);
}
static int vmbus_platform_driver_remove(struct platform_device *pdev)
static void vmbus_platform_driver_remove(struct platform_device *pdev)
{
vmbus_mmio_remove();
return 0;
}
#ifdef CONFIG_PM_SLEEP
......@@ -2542,7 +2541,7 @@ static const struct dev_pm_ops vmbus_bus_pm = {
static struct platform_driver vmbus_platform_driver = {
.probe = vmbus_platform_driver_probe,
.remove = vmbus_platform_driver_remove,
.remove_new = vmbus_platform_driver_remove,
.driver = {
.name = "vmbus",
.acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
......
......@@ -625,6 +625,37 @@ struct hv_retarget_device_interrupt {
struct hv_device_interrupt_target int_target;
} __packed __aligned(8);
/*
* These Hyper-V registers provide information equivalent to the CPUID
* instruction on x86/x64.
*/
#define HV_REGISTER_HYPERVISOR_VERSION 0x00000100 /*CPUID 0x40000002 */
#define HV_REGISTER_FEATURES 0x00000200 /*CPUID 0x40000003 */
#define HV_REGISTER_ENLIGHTENMENTS 0x00000201 /*CPUID 0x40000004 */
/*
* Synthetic register definitions equivalent to MSRs on x86/x64
*/
#define HV_REGISTER_GUEST_CRASH_P0 0x00000210
#define HV_REGISTER_GUEST_CRASH_P1 0x00000211
#define HV_REGISTER_GUEST_CRASH_P2 0x00000212
#define HV_REGISTER_GUEST_CRASH_P3 0x00000213
#define HV_REGISTER_GUEST_CRASH_P4 0x00000214
#define HV_REGISTER_GUEST_CRASH_CTL 0x00000215
#define HV_REGISTER_GUEST_OS_ID 0x00090002
#define HV_REGISTER_VP_INDEX 0x00090003
#define HV_REGISTER_TIME_REF_COUNT 0x00090004
#define HV_REGISTER_REFERENCE_TSC 0x00090017
#define HV_REGISTER_SINT0 0x000A0000
#define HV_REGISTER_SCONTROL 0x000A0010
#define HV_REGISTER_SIEFP 0x000A0012
#define HV_REGISTER_SIMP 0x000A0013
#define HV_REGISTER_EOM 0x000A0014
#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
#define HV_REGISTER_STIMER0_COUNT 0x000B0001
/* HvGetVpRegisters hypercall input with variable size reg name list*/
struct hv_get_vp_registers_input {
......@@ -640,7 +671,6 @@ struct hv_get_vp_registers_input {
} element[];
} __packed;
/* HvGetVpRegisters returns an array of these output elements */
struct hv_get_vp_registers_output {
union {
......@@ -787,6 +817,29 @@ struct hv_input_unmap_device_interrupt {
#define HV_SOURCE_SHADOW_NONE 0x0
#define HV_SOURCE_SHADOW_BRIDGE_BUS_RANGE 0x1
/*
* Version info reported by hypervisor
*/
union hv_hypervisor_version_info {
struct {
u32 build_number;
u32 minor_version : 16;
u32 major_version : 16;
u32 service_pack;
u32 service_number : 24;
u32 service_branch : 8;
};
struct {
u32 eax;
u32 ebx;
u32 ecx;
u32 edx;
};
};
/*
* The whole argument should fit in a page to be able to pass to the hypervisor
* in one hypercall.
......
......@@ -157,10 +157,12 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
* possibly deliver another msg from the
* hypervisor
*/
hv_set_register(HV_REGISTER_EOM, 0);
hv_set_msr(HV_MSR_EOM, 0);
}
}
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
void hv_remove_vmbus_handler(void);
void hv_setup_stimer0_handler(void (*handler)(void));
......@@ -193,6 +195,7 @@ extern u64 (*hv_read_reference_counter)(void);
int __init hv_common_init(void);
void __init hv_common_free(void);
void __init ms_hyperv_late_init(void);
int hv_common_cpu_init(unsigned int cpu);
int hv_common_cpu_die(unsigned int cpu);
......@@ -290,6 +293,7 @@ void hv_setup_dma_ops(struct device *dev, bool coherent);
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
static inline void ms_hyperv_late_init(void) {}
static inline bool hv_is_isolation_supported(void) { return false; }
static inline enum hv_isolation_type hv_get_isolation_type(void)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment