Commit e9de42d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Reverting a 3.16 patch, fixing two bugs in device assignment (one has
  a CVE), and fixing some problems introduced during the merge window
  (the CMA bug came in via Andrew, the x86 ones via yours truly)"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  virt/kvm/assigned-dev.c: Set 'dev->irq_source_id' to '-1' after free it
  Revert "KVM: x86: Increase the number of fixed MTRR regs to 10"
  KVM: x86: do not check CS.DPL against RPL during task switch
  KVM: x86: Avoid emulating instructions on #UD mistakenly
  PC, KVM, CMA: Fix regression caused by wrong get_order() use
  kvm: iommu: fix the third parameter of kvm_iommu_put_pages (CVE-2014-3601)
parents be816bc4 30d1e0e8
...@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma() ...@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
if (!ri) if (!ri)
return NULL; return NULL;
page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages)); page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
if (!page) if (!page)
goto err_out; goto err_out;
atomic_set(&ri->use_count, 1); atomic_set(&ri->use_count, 1);
...@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages) ...@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
{ {
unsigned long align_pages = HPT_ALIGN_PAGES; unsigned long align_pages = HPT_ALIGN_PAGES;
VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
/* Old CPUs require HPT aligned on a multiple of its size */ /* Old CPUs require HPT aligned on a multiple of its size */
if (!cpu_has_feature(CPU_FTR_ARCH_206)) if (!cpu_has_feature(CPU_FTR_ARCH_206))
align_pages = nr_pages; align_pages = nr_pages;
return cma_alloc(kvm_cma, nr_pages, get_order(align_pages)); return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
} }
EXPORT_SYMBOL_GPL(kvm_alloc_hpt); EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
......
...@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) ...@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
#define KVM_REFILL_PAGES 25 #define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 80 #define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_FIXED_MTRR_REGION 88
#define KVM_NR_VAR_MTRR 10 #define KVM_NR_VAR_MTRR 8
#define ASYNC_PF_PER_VCPU 64 #define ASYNC_PF_PER_VCPU 64
......
...@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1491,9 +1491,6 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
goto exception; goto exception;
break; break;
case VCPU_SREG_CS: case VCPU_SREG_CS:
if (in_task_switch && rpl != dpl)
goto exception;
if (!(seg_desc.type & 8)) if (!(seg_desc.type & 8))
goto exception; goto exception;
...@@ -4394,8 +4391,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ...@@ -4394,8 +4391,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->execute = opcode.u.execute; ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d & if (unlikely(ctxt->d &
(NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/* /*
* These are copied unconditionally here, and checked unconditionally * These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn. * in x86_emulate_insn.
...@@ -4406,9 +4406,6 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ...@@ -4406,9 +4406,6 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
if (ctxt->d & NotImpl) if (ctxt->d & NotImpl)
return EMULATION_FAILED; return EMULATION_FAILED;
if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
ctxt->op_bytes = 8; ctxt->op_bytes = 8;
......
...@@ -526,8 +526,10 @@ static int assign_guest_irq(struct kvm *kvm, ...@@ -526,8 +526,10 @@ static int assign_guest_irq(struct kvm *kvm,
dev->irq_requested_type |= guest_irq_type; dev->irq_requested_type |= guest_irq_type;
if (dev->ack_notifier.gsi != -1) if (dev->ack_notifier.gsi != -1)
kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
} else } else {
kvm_free_irq_source_id(kvm, dev->irq_source_id); kvm_free_irq_source_id(kvm, dev->irq_source_id);
dev->irq_source_id = -1;
}
return r; return r;
} }
......
...@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, ...@@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
return pfn; return pfn;
} }
static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
unsigned long i;
for (i = 0; i < npages; ++i)
kvm_release_pfn_clean(pfn + i);
}
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{ {
gfn_t gfn, end_gfn; gfn_t gfn, end_gfn;
...@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) ...@@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
if (r) { if (r) {
printk(KERN_ERR "kvm_iommu_map_address:" printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn); "iommu failed to map pfn=%llx\n", pfn);
kvm_unpin_pages(kvm, pfn, page_size);
goto unmap_pages; goto unmap_pages;
} }
...@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) ...@@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
return 0; return 0;
unmap_pages: unmap_pages:
kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
return r; return r;
} }
...@@ -266,14 +275,6 @@ int kvm_iommu_map_guest(struct kvm *kvm) ...@@ -266,14 +275,6 @@ int kvm_iommu_map_guest(struct kvm *kvm)
return r; return r;
} }
static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
{
unsigned long i;
for (i = 0; i < npages; ++i)
kvm_release_pfn_clean(pfn + i);
}
static void kvm_iommu_put_pages(struct kvm *kvm, static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages) gfn_t base_gfn, unsigned long npages)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment