Commit c04fa583 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Paolo Bonzini

PC, KVM, CMA: Fix regression caused by wrong get_order() use

fc95ca72 claims that there is no
functional change but this is not true as it calls get_order() (which
takes bytes) where it should have called order_base_2() and the kernel
stops on VM_BUG_ON().

This replaces get_order() with order_base_2() (round-up version of ilog2).
Suggested-by: default avatarPaul Mackerras <paulus@samba.org>
Cc: Alexander Graf <agraf@suse.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 350b8bdd
...@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma() ...@@ -101,7 +101,7 @@ struct kvm_rma_info *kvm_alloc_rma()
ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL); ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
if (!ri) if (!ri)
return NULL; return NULL;
page = cma_alloc(kvm_cma, kvm_rma_pages, get_order(kvm_rma_pages)); page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
if (!page) if (!page)
goto err_out; goto err_out;
atomic_set(&ri->use_count, 1); atomic_set(&ri->use_count, 1);
...@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages) ...@@ -135,12 +135,12 @@ struct page *kvm_alloc_hpt(unsigned long nr_pages)
{ {
unsigned long align_pages = HPT_ALIGN_PAGES; unsigned long align_pages = HPT_ALIGN_PAGES;
VM_BUG_ON(get_order(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
/* Old CPUs require HPT aligned on a multiple of its size */ /* Old CPUs require HPT aligned on a multiple of its size */
if (!cpu_has_feature(CPU_FTR_ARCH_206)) if (!cpu_has_feature(CPU_FTR_ARCH_206))
align_pages = nr_pages; align_pages = nr_pages;
return cma_alloc(kvm_cma, nr_pages, get_order(align_pages)); return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
} }
EXPORT_SYMBOL_GPL(kvm_alloc_hpt); EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment