Commit 3fd09c45 authored by Thomas Klein's avatar Thomas Klein Committed by Jeff Garzik

ehea: Detect 16GB hugepages for firmware restriction

All kernel memory which is used for kernel/hardware data transfer must
be registered with firmware using "memory regions". 16GB hugepages
may not be part of a memory region due to firmware restrictions.
This patch modifies the walk_memory_resource callback fn to filter
hugepages and add only standard memory to the busmap which is later
on used for MR registration.
Signed-off-by: default avatarThomas Klein <tklein@de.ibm.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 74d5e8ac
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0094" #define DRV_VERSION "EHEA_0095"
/* eHEA capability flags */ /* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1 #define DLPAR_PORT_ADD_REM 1
......
...@@ -632,10 +632,13 @@ static void ehea_rebuild_busmap(void) ...@@ -632,10 +632,13 @@ static void ehea_rebuild_busmap(void)
} }
} }
static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add) static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
{ {
unsigned long i, start_section, end_section; unsigned long i, start_section, end_section;
if (!nr_pages)
return 0;
if (!ehea_bmap) { if (!ehea_bmap) {
ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
if (!ehea_bmap) if (!ehea_bmap)
...@@ -643,7 +646,7 @@ static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add) ...@@ -643,7 +646,7 @@ static int ehea_update_busmap(unsigned long pfn, unsigned long pgnum, int add)
} }
start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE; start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
end_section = start_section + ((pgnum * PAGE_SIZE) / EHEA_SECTSIZE); end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
/* Mark entries as valid or invalid only; address is assigned later */ /* Mark entries as valid or invalid only; address is assigned later */
for (i = start_section; i < end_section; i++) { for (i = start_section; i < end_section; i++) {
u64 flag; u64 flag;
...@@ -692,10 +695,54 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages) ...@@ -692,10 +695,54 @@ int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
return ret; return ret;
} }
static int ehea_create_busmap_callback(unsigned long pfn, static int ehea_is_hugepage(unsigned long pfn)
unsigned long nr_pages, void *arg) {
int page_order;
if (pfn & EHEA_HUGEPAGE_PFN_MASK)
return 0;
page_order = compound_order(pfn_to_page(pfn));
if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
return 0;
return 1;
}
static int ehea_create_busmap_callback(unsigned long initial_pfn,
unsigned long total_nr_pages, void *arg)
{ {
return ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT); int ret;
unsigned long pfn, start_pfn, end_pfn, nr_pages;
if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
return ehea_update_busmap(initial_pfn, total_nr_pages,
EHEA_BUSMAP_ADD_SECT);
/* Given chunk is >= 16GB -> check for hugepages */
start_pfn = initial_pfn;
end_pfn = initial_pfn + total_nr_pages;
pfn = start_pfn;
while (pfn < end_pfn) {
if (ehea_is_hugepage(pfn)) {
/* Add mem found in front of the hugepage */
nr_pages = pfn - start_pfn;
ret = ehea_update_busmap(start_pfn, nr_pages,
EHEA_BUSMAP_ADD_SECT);
if (ret)
return ret;
/* Skip the hugepage */
pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
start_pfn = pfn;
} else
pfn += (EHEA_SECTSIZE / PAGE_SIZE);
}
/* Add mem found behind the hugepage(s) */
nr_pages = pfn - start_pfn;
return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
} }
int ehea_create_busmap(void) int ehea_create_busmap(void)
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE (1UL << 24) #define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT) #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
#define EHEA_HUGEPAGESHIFT 34
#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE) #if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
#error eHEA module cannot work if kernel sectionsize < ehea sectionsize #error eHEA module cannot work if kernel sectionsize < ehea sectionsize
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment