Commit f900a4b8 authored by Borislav Petkov's avatar Borislav Petkov Committed by Matt Fleming

x86/mm/pageattr: Add a PMD pagetable populating function

Handle PMD-level mappings the same as PUD ones.
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
parent 4b23538d
...@@ -666,6 +666,16 @@ static int split_large_page(pte_t *kpte, unsigned long address) ...@@ -666,6 +666,16 @@ static int split_large_page(pte_t *kpte, unsigned long address)
return 0; return 0;
} }
static int alloc_pte_page(pmd_t *pmd)
{
pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
if (!pte)
return -1;
set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
return 0;
}
static int alloc_pmd_page(pud_t *pud) static int alloc_pmd_page(pud_t *pud)
{ {
pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
...@@ -676,7 +686,77 @@ static int alloc_pmd_page(pud_t *pud) ...@@ -676,7 +686,77 @@ static int alloc_pmd_page(pud_t *pud)
return 0; return 0;
} }
#define populate_pmd(cpa, start, end, pages, pud, pgprot) (-1) #define populate_pte(cpa, start, end, pages, pmd, pgprot) do {} while (0)
static int populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
unsigned int cur_pages = 0;
pmd_t *pmd;
/*
* Not on a 2M boundary?
*/
if (start & (PMD_SIZE - 1)) {
unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
pre_end = min_t(unsigned long, pre_end, next_page);
cur_pages = (pre_end - start) >> PAGE_SHIFT;
cur_pages = min_t(unsigned int, num_pages, cur_pages);
/*
* Need a PTE page?
*/
pmd = pmd_offset(pud, start);
if (pmd_none(*pmd))
if (alloc_pte_page(pmd))
return -1;
populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
start = pre_end;
}
/*
* We mapped them all?
*/
if (num_pages == cur_pages)
return cur_pages;
while (end - start >= PMD_SIZE) {
/*
* We cannot use a 1G page so allocate a PMD page if needed.
*/
if (pud_none(*pud))
if (alloc_pmd_page(pud))
return -1;
pmd = pmd_offset(pud, start);
set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
start += PMD_SIZE;
cpa->pfn += PMD_SIZE;
cur_pages += PMD_SIZE >> PAGE_SHIFT;
}
/*
* Map trailing 4K pages.
*/
if (start < end) {
pmd = pmd_offset(pud, start);
if (pmd_none(*pmd))
if (alloc_pte_page(pmd))
return -1;
populate_pte(cpa, start, end, num_pages - cur_pages,
pmd, pgprot);
}
return num_pages;
}
static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
pgprot_t pgprot) pgprot_t pgprot)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment