Commit 8ef81a9a authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Paolo Bonzini

KVM: x86: hide KVM_HC_CLOCK_PAIRING on 32 bit

The newly added hypercall doesn't work on x86-32:

arch/x86/kvm/x86.c: In function 'kvm_pv_clock_pairing':
arch/x86/kvm/x86.c:6163:6: error: implicit declaration of function 'kvm_get_walltime_and_clockread';did you mean 'kvm_get_time_scale'? [-Werror=implicit-function-declaration]

This adds an #ifdef around it, matching the one around the related
functions that are also only implemented on 64-bit systems.

Fixes: 55dd00a7 ("KVM: x86: add KVM_HC_CLOCK_PAIRING hypercall")
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2e751dfb
...@@ -6148,6 +6148,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu) ...@@ -6148,6 +6148,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
} }
EXPORT_SYMBOL_GPL(kvm_emulate_halt); EXPORT_SYMBOL_GPL(kvm_emulate_halt);
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
unsigned long clock_type) unsigned long clock_type)
{ {
...@@ -6174,6 +6175,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, ...@@ -6174,6 +6175,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
return ret; return ret;
} }
#endif
/* /*
* kvm_pv_kick_cpu_op: Kick a vcpu. * kvm_pv_kick_cpu_op: Kick a vcpu.
...@@ -6239,9 +6241,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) ...@@ -6239,9 +6241,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
ret = 0; ret = 0;
break; break;
#ifdef CONFIG_X86_64
case KVM_HC_CLOCK_PAIRING: case KVM_HC_CLOCK_PAIRING:
ret = kvm_pv_clock_pairing(vcpu, a0, a1); ret = kvm_pv_clock_pairing(vcpu, a0, a1);
break; break;
#endif
default: default:
ret = -KVM_ENOSYS; ret = -KVM_ENOSYS;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment