Commit 990978e9 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Alexander Graf

powerpc/kvm: Use 256K chunk to track both RMA and hash page table allocation.

Both RMA and hash page table request will be a multiple of 256K. We can use
a chunk size of 256K to track the free/used 256K chunk in the bitmap. This
should help to reduce the bitmap size.
Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAlexander Graf <agraf@suse.de>
parent 6c45b810
...@@ -37,6 +37,8 @@ ...@@ -37,6 +37,8 @@
#include <asm/ppc-opcode.h> #include <asm/ppc-opcode.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include "book3s_hv_cma.h"
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63 #define MAX_LPID_970 63
...@@ -71,6 +73,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) ...@@ -71,6 +73,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
/* Next try to allocate from the preallocated pool */ /* Next try to allocate from the preallocated pool */
if (!hpt) { if (!hpt) {
VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
if (page) { if (page) {
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "book3s_hv_cma.h"
struct kvm_cma { struct kvm_cma {
unsigned long base_pfn; unsigned long base_pfn;
unsigned long count; unsigned long count;
...@@ -96,6 +98,7 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) ...@@ -96,6 +98,7 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
int ret; int ret;
struct page *page = NULL; struct page *page = NULL;
struct kvm_cma *cma = &kvm_cma_area; struct kvm_cma *cma = &kvm_cma_area;
unsigned long chunk_count, nr_chunk;
unsigned long mask, pfn, pageno, start = 0; unsigned long mask, pfn, pageno, start = 0;
...@@ -107,21 +110,27 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) ...@@ -107,21 +110,27 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
if (!nr_pages) if (!nr_pages)
return NULL; return NULL;
/*
* align mask with chunk size. The bit tracks pages in chunk size
*/
VM_BUG_ON(!is_power_of_2(align_pages)); VM_BUG_ON(!is_power_of_2(align_pages));
mask = align_pages - 1; mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
mutex_lock(&kvm_cma_mutex); mutex_lock(&kvm_cma_mutex);
for (;;) { for (;;) {
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
start, nr_pages, mask); start, nr_chunk, mask);
if (pageno >= cma->count) if (pageno >= chunk_count)
break; break;
pfn = cma->base_pfn + pageno; pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA); ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
if (ret == 0) { if (ret == 0) {
bitmap_set(cma->bitmap, pageno, nr_pages); bitmap_set(cma->bitmap, pageno, nr_chunk);
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT); memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
break; break;
...@@ -150,9 +159,9 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages) ...@@ -150,9 +159,9 @@ struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
bool kvm_release_cma(struct page *pages, unsigned long nr_pages) bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
{ {
unsigned long pfn; unsigned long pfn;
unsigned long nr_chunk;
struct kvm_cma *cma = &kvm_cma_area; struct kvm_cma *cma = &kvm_cma_area;
if (!cma || !pages) if (!cma || !pages)
return false; return false;
...@@ -164,9 +173,12 @@ bool kvm_release_cma(struct page *pages, unsigned long nr_pages) ...@@ -164,9 +173,12 @@ bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
return false; return false;
VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count); VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
mutex_lock(&kvm_cma_mutex); mutex_lock(&kvm_cma_mutex);
bitmap_clear(cma->bitmap, pfn - cma->base_pfn, nr_pages); bitmap_clear(cma->bitmap,
(pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
nr_chunk);
free_contig_range(pfn, nr_pages); free_contig_range(pfn, nr_pages);
mutex_unlock(&kvm_cma_mutex); mutex_unlock(&kvm_cma_mutex);
...@@ -204,13 +216,14 @@ static int __init kvm_cma_activate_area(unsigned long base_pfn, ...@@ -204,13 +216,14 @@ static int __init kvm_cma_activate_area(unsigned long base_pfn,
static int __init kvm_cma_init_reserved_areas(void) static int __init kvm_cma_init_reserved_areas(void)
{ {
int bitmap_size, ret; int bitmap_size, ret;
unsigned long chunk_count;
struct kvm_cma *cma = &kvm_cma_area; struct kvm_cma *cma = &kvm_cma_area;
pr_debug("%s()\n", __func__); pr_debug("%s()\n", __func__);
if (!cma->count) if (!cma->count)
return 0; return 0;
chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
if (!cma->bitmap) if (!cma->bitmap)
return -ENOMEM; return -ENOMEM;
......
...@@ -14,6 +14,11 @@ ...@@ -14,6 +14,11 @@
#ifndef __POWERPC_KVM_CMA_ALLOC_H__ #ifndef __POWERPC_KVM_CMA_ALLOC_H__
#define __POWERPC_KVM_CMA_ALLOC_H__ #define __POWERPC_KVM_CMA_ALLOC_H__
/*
* Both RMA and Hash page allocation will be multiple of 256K.
*/
#define KVM_CMA_CHUNK_ORDER 18
extern struct page *kvm_alloc_cma(unsigned long nr_pages, extern struct page *kvm_alloc_cma(unsigned long nr_pages,
unsigned long align_pages); unsigned long align_pages);
extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages); extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment