Commit 7e96bf47 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "ARM:

   - Fix MTE shared page detection

   - Enable selftest's use of PMU registers when asked to

  s390:

   - restore 5.13 debugfs names

  x86:

   - fix sizes for vcpu-id indexed arrays

   - fixes for AMD virtualized LAPIC (AVIC)

   - other small bugfixes

  Generic:

   - access tracking performance test

   - dirty_log_perf_test command line parsing fix

   - Fix selftest use of obsolete pthread_yield() in favour of
     sched_yield()

   - use cpu_relax when halt polling

   - fixed missing KVM_CLEAR_DIRTY_LOG compat ioctl"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: add missing compat KVM_CLEAR_DIRTY_LOG
  KVM: use cpu_relax when halt polling
  KVM: SVM: use vmcb01 in svm_refresh_apicv_exec_ctrl
  KVM: SVM: tweak warning about enabled AVIC on nested entry
  KVM: SVM: svm_set_vintr don't warn if AVIC is active but is about to be deactivated
  KVM: s390: restore old debugfs names
  KVM: SVM: delay svm_vcpu_init_msrpm after svm->vmcb is initialized
  KVM: selftests: Introduce access_tracking_perf_test
  KVM: selftests: Fix missing break in dirty_log_perf_test arg parsing
  x86/kvm: fix vcpu-id indexed array sizes
  KVM: x86: Check the right feature bit for MSR_KVM_ASYNC_PF_ACK access
  docs: virt: kvm: api.rst: replace some characters
  KVM: Documentation: Fix KVM_CAP_ENFORCE_PV_FEATURE_CPUID name
  KVM: nSVM: Swap the parameter order for svm_copy_vmrun_state()/svm_copy_vmloadsave_state()
  KVM: nSVM: Rename nested_svm_vmloadsave() to svm_copy_vmloadsave_state()
  KVM: arm64: selftests: get-reg-list: actually enable pmu regs in pmu sublist
  KVM: selftests: change pthread_yield to sched_yield
  KVM: arm64: Fix detection of shared VMAs on guest fault
parents 2b99c470 8750f9bb
......@@ -855,7 +855,7 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
use PPIs designated for specific cpus. The irq field is interpreted
like this::
 bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
field: | vcpu2_index | irq_type | vcpu_index | irq_id |
The irq_type field has the following values:
......@@ -2149,10 +2149,10 @@ prior to calling the KVM_RUN ioctl.
Errors:
====== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
ENOENT no such register
EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
EPERM (arm64) register access not allowed before vcpu finalization
====== ============================================================
(These error codes are indicative only: do not rely on a specific error
......@@ -2590,10 +2590,10 @@ following id bit patterns::
Errors include:
======== ============================================================
 ENOENT   no such register
 EINVAL   invalid register ID, or no such register or used with VMs in
ENOENT no such register
EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
 EPERM    (arm64) register access not allowed before vcpu finalization
EPERM (arm64) register access not allowed before vcpu finalization
======== ============================================================
(These error codes are indicative only: do not rely on a specific error
......@@ -3112,13 +3112,13 @@ current state. "addr" is ignored.
Errors:
====== =================================================================
 EINVAL    the target is unknown, or the combination of features is invalid.
 ENOENT    a features bit specified is unknown.
EINVAL the target is unknown, or the combination of features is invalid.
ENOENT a features bit specified is unknown.
====== =================================================================
This tells KVM what type of CPU to present to the guest, and what
optional features it should have.  This will cause a reset of the cpu
registers to their initial values.  If this is not called, KVM_RUN will
optional features it should have. This will cause a reset of the cpu
registers to their initial values. If this is not called, KVM_RUN will
return ENOEXEC for that vcpu.
The initial values are defined as:
......@@ -3239,8 +3239,8 @@ VCPU matching underlying host.
Errors:
===== ==============================================================
 E2BIG     the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
E2BIG the reg index list is too big to fit in the array specified by
the user (the number required will be written into n).
===== ==============================================================
::
......@@ -3288,7 +3288,7 @@ specific device.
ARM/arm64 divides the id field into two parts, a device id and an
address type id specific to the individual device::
 bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
field: | 0x00000000 | device id | addr type id |
ARM/arm64 currently only require this when using the in-kernel GIC
......@@ -7049,7 +7049,7 @@ In combination with KVM_CAP_X86_USER_SPACE_MSR, this allows user space to
trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
8.28 KVM_CAP_ENFORCE_PV_CPUID
8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
-----------------------------
Architectures: x86
......
......@@ -947,7 +947,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = get_vma_page_shift(vma, hva);
}
shared = (vma->vm_flags & VM_PFNMAP);
shared = (vma->vm_flags & VM_SHARED);
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
......
......@@ -445,15 +445,15 @@ struct kvm_vcpu_stat {
u64 instruction_sigp_init_cpu_reset;
u64 instruction_sigp_cpu_reset;
u64 instruction_sigp_unknown;
u64 diagnose_10;
u64 diagnose_44;
u64 diagnose_9c;
u64 diagnose_9c_ignored;
u64 diagnose_9c_forward;
u64 diagnose_258;
u64 diagnose_308;
u64 diagnose_500;
u64 diagnose_other;
u64 instruction_diagnose_10;
u64 instruction_diagnose_44;
u64 instruction_diagnose_9c;
u64 diag_9c_ignored;
u64 diag_9c_forward;
u64 instruction_diagnose_258;
u64 instruction_diagnose_308;
u64 instruction_diagnose_500;
u64 instruction_diagnose_other;
u64 pfault_sync;
};
......
......@@ -24,7 +24,7 @@ static int diag_release_pages(struct kvm_vcpu *vcpu)
start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE;
vcpu->stat.diagnose_10++;
vcpu->stat.instruction_diagnose_10++;
if (start & ~PAGE_MASK || end & ~PAGE_MASK || start >= end
|| start < 2 * PAGE_SIZE)
......@@ -74,7 +74,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 3, "diag page reference parameter block at 0x%llx",
vcpu->run->s.regs.gprs[rx]);
vcpu->stat.diagnose_258++;
vcpu->stat.instruction_diagnose_258++;
if (vcpu->run->s.regs.gprs[rx] & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm));
......@@ -145,7 +145,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
{
VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
vcpu->stat.diagnose_44++;
vcpu->stat.instruction_diagnose_44++;
kvm_vcpu_on_spin(vcpu, true);
return 0;
}
......@@ -169,7 +169,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
int tid;
tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
vcpu->stat.diagnose_9c++;
vcpu->stat.instruction_diagnose_9c++;
/* yield to self */
if (tid == vcpu->vcpu_id)
......@@ -192,7 +192,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5,
"diag time slice end directed to %d: yield forwarded",
tid);
vcpu->stat.diagnose_9c_forward++;
vcpu->stat.diag_9c_forward++;
return 0;
}
......@@ -203,7 +203,7 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
return 0;
no_yield:
VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d: ignored", tid);
vcpu->stat.diagnose_9c_ignored++;
vcpu->stat.diag_9c_ignored++;
return 0;
}
......@@ -213,7 +213,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
unsigned long subcode = vcpu->run->s.regs.gprs[reg] & 0xffff;
VCPU_EVENT(vcpu, 3, "diag ipl functions, subcode %lx", subcode);
vcpu->stat.diagnose_308++;
vcpu->stat.instruction_diagnose_308++;
switch (subcode) {
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
......@@ -245,7 +245,7 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
{
int ret;
vcpu->stat.diagnose_500++;
vcpu->stat.instruction_diagnose_500++;
/* No virtio-ccw notification? Get out quickly. */
if (!vcpu->kvm->arch.css_support ||
(vcpu->run->s.regs.gprs[1] != KVM_S390_VIRTIO_CCW_NOTIFY))
......@@ -299,7 +299,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
case 0x500:
return __diag_virtio_hypercall(vcpu);
default:
vcpu->stat.diagnose_other++;
vcpu->stat.instruction_diagnose_other++;
return -EOPNOTSUPP;
}
}
......@@ -163,15 +163,15 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
STATS_DESC_COUNTER(VCPU, diagnose_10),
STATS_DESC_COUNTER(VCPU, diagnose_44),
STATS_DESC_COUNTER(VCPU, diagnose_9c),
STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
STATS_DESC_COUNTER(VCPU, diagnose_258),
STATS_DESC_COUNTER(VCPU, diagnose_308),
STATS_DESC_COUNTER(VCPU, diagnose_500),
STATS_DESC_COUNTER(VCPU, diagnose_other),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
STATS_DESC_COUNTER(VCPU, diag_9c_forward),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
STATS_DESC_COUNTER(VCPU, pfault_sync)
};
static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
......
......@@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
......
......@@ -43,13 +43,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
u8 vectors[KVM_MAX_VCPU_ID];
u8 vectors[KVM_MAX_VCPU_ID + 1];
};
......
......@@ -646,7 +646,7 @@ static int svm_set_pi_irte_mode(struct kvm_vcpu *vcpu, bool activate)
void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
struct vmcb *vmcb = svm->vmcb01.ptr;
bool activated = kvm_vcpu_apicv_active(vcpu);
if (!enable_apicv)
......
......@@ -515,7 +515,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
* Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
* avic_physical_id.
*/
WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
/* Copied from vmcb01. msrpm_base can be overwritten later. */
svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
......@@ -702,8 +702,8 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
}
/* Copy state save area fields which are handled by VMRUN */
void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
struct vmcb_save_area *to_save)
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
struct vmcb_save_area *from_save)
{
to_save->es = from_save->es;
to_save->cs = from_save->cs;
......@@ -722,7 +722,7 @@ void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
to_save->cpl = 0;
}
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
......@@ -1385,7 +1385,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
svm_copy_vmrun_state(save, &svm->vmcb01.ptr->save);
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
nested_load_control_from_vmcb12(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02);
......
......@@ -1406,8 +1406,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
goto error_free_vmsa_page;
}
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
svm->vmcb01.ptr = page_address(vmcb01_page);
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
......@@ -1419,6 +1417,8 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_switch_vmcb(svm, &svm->vmcb01);
init_vmcb(vcpu);
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065;
......@@ -1568,8 +1568,11 @@ static void svm_set_vintr(struct vcpu_svm *svm)
{
struct vmcb_control_area *control;
/* The following fields are ignored when AVIC is enabled */
WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu));
/*
* The following fields are ignored when AVIC is enabled
*/
WARN_ON(kvm_apicv_activated(svm->vcpu.kvm));
svm_set_intercept(svm, INTERCEPT_VINTR);
/*
......@@ -2147,11 +2150,12 @@ static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
ret = kvm_skip_emulated_instruction(vcpu);
if (vmload) {
nested_svm_vmloadsave(vmcb12, svm->vmcb);
svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
svm->sysenter_eip_hi = 0;
svm->sysenter_esp_hi = 0;
} else
nested_svm_vmloadsave(svm->vmcb, vmcb12);
} else {
svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
}
kvm_vcpu_unmap(vcpu, &map, true);
......@@ -4344,8 +4348,8 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
map_save.hva + 0x400);
svm_copy_vmrun_state(map_save.hva + 0x400,
&svm->vmcb01.ptr->save);
kvm_vcpu_unmap(vcpu, &map_save, true);
}
......@@ -4393,8 +4397,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
&map_save) == -EINVAL)
return 1;
svm_copy_vmrun_state(map_save.hva + 0x400,
&svm->vmcb01.ptr->save);
svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
map_save.hva + 0x400);
kvm_vcpu_unmap(vcpu, &map_save, true);
}
......
......@@ -464,9 +464,9 @@ void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
void svm_copy_vmrun_state(struct vmcb_save_area *from_save,
struct vmcb_save_area *to_save);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
struct vmcb_save_area *from_save);
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
......
......@@ -89,7 +89,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
* as we mark it dirty unconditionally towards end of vcpu
* init phase.
*/
if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
}
......
......@@ -3407,7 +3407,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
break;
case MSR_KVM_ASYNC_PF_ACK:
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
......@@ -3746,7 +3746,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.apf.msr_int_val;
break;
case MSR_KVM_ASYNC_PF_ACK:
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
msr_info->data = 0;
......
......@@ -38,6 +38,7 @@
/x86_64/xen_vmcall_test
/x86_64/xss_msr_test
/x86_64/vmx_pmu_msrs_test
/access_tracking_perf_test
/demand_paging_test
/dirty_log_test
/dirty_log_perf_test
......
......@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
......
......@@ -1019,7 +1019,8 @@ static __u64 sve_rejects_set[] = {
#define VREGS_SUBLIST \
{ "vregs", .regs = vregs, .regs_n = ARRAY_SIZE(vregs), }
#define PMU_SUBLIST \
{ "pmu", .regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
{ "pmu", .capability = KVM_CAP_ARM_PMU_V3, .feature = KVM_ARM_VCPU_PMU_V3, \
.regs = pmu_regs, .regs_n = ARRAY_SIZE(pmu_regs), }
#define SVE_SUBLIST \
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
......
This diff is collapsed.
......@@ -312,6 +312,7 @@ int main(int argc, char *argv[])
break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
case 's':
p.backing_src = parse_backing_src_type(optarg);
break;
......
......@@ -320,7 +320,7 @@ int main(int ac, char **av)
run_delay = get_run_delay();
pthread_create(&thread, &attr, do_steal_time, NULL);
do
pthread_yield();
sched_yield();
while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
pthread_join(thread, NULL);
run_delay = get_run_delay() - run_delay;
......
......@@ -3110,6 +3110,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
++vcpu->stat.generic.halt_poll_invalid;
goto out;
}
cpu_relax();
poll_end = cur = ktime_get();
} while (kvm_vcpu_can_poll(cur, stop));
}
......@@ -4390,6 +4391,16 @@ struct compat_kvm_dirty_log {
};
};
struct compat_kvm_clear_dirty_log {
__u32 slot;
__u32 num_pages;
__u64 first_page;
union {
compat_uptr_t dirty_bitmap; /* one bit per page */
__u64 padding2;
};
};
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
......@@ -4399,6 +4410,24 @@ static long kvm_vm_compat_ioctl(struct file *filp,
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
case KVM_CLEAR_DIRTY_LOG: {
struct compat_kvm_clear_dirty_log compat_log;
struct kvm_clear_dirty_log log;
if (copy_from_user(&compat_log, (void __user *)arg,
sizeof(compat_log)))
return -EFAULT;
log.slot = compat_log.slot;
log.num_pages = compat_log.num_pages;
log.first_page = compat_log.first_page;
log.padding2 = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
break;
}
#endif
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment