Commit 0e4c12b4 authored by Tom Lendacky's avatar Tom Lendacky Committed by Thomas Gleixner

x86/mm, resource: Use PAGE_KERNEL protection for ioremap of memory pages

In order for memory pages to be properly mapped when SEV is active, it's
necessary to use the PAGE_KERNEL protection attribute as the base
protection.  This ensures that memory mapping of, e.g. ACPI tables,
receives the proper mapping attributes.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBrijesh Singh <brijesh.singh@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarBorislav Petkov <bp@suse.de>
Cc: Laura Abbott <labbott@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: kvm@vger.kernel.org
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Link: https://lkml.kernel.org/r/20171020143059.3291-11-brijesh.singh@amd.com
parent 1d2e733b
...@@ -27,6 +27,11 @@ ...@@ -27,6 +27,11 @@
#include "physaddr.h" #include "physaddr.h"
struct ioremap_mem_flags {
bool system_ram;
bool desc_other;
};
/* /*
* Fix up the linear direct mapping of the kernel to avoid cache attribute * Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts. * conflicts.
...@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size, ...@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err; return err;
} }
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, static bool __ioremap_check_ram(struct resource *res)
void *arg)
{ {
unsigned long start_pfn, stop_pfn;
unsigned long i; unsigned long i;
for (i = 0; i < nr_pages; ++i) if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
if (pfn_valid(start_pfn + i) && return false;
!PageReserved(pfn_to_page(start_pfn + i)))
return 1;
return 0; start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
stop_pfn = (res->end + 1) >> PAGE_SHIFT;
if (stop_pfn > start_pfn) {
for (i = 0; i < (stop_pfn - start_pfn); ++i)
if (pfn_valid(start_pfn + i) &&
!PageReserved(pfn_to_page(start_pfn + i)))
return true;
}
return false;
}
static int __ioremap_check_desc_other(struct resource *res)
{
return (res->desc != IORES_DESC_NONE);
}
static int __ioremap_res_check(struct resource *res, void *arg)
{
struct ioremap_mem_flags *flags = arg;
if (!flags->system_ram)
flags->system_ram = __ioremap_check_ram(res);
if (!flags->desc_other)
flags->desc_other = __ioremap_check_desc_other(res);
return flags->system_ram && flags->desc_other;
}
/*
* To avoid multiple resource walks, this function walks resources marked as
* IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
* resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
*/
static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
struct ioremap_mem_flags *flags)
{
u64 start, end;
start = (u64)addr;
end = start + size - 1;
memset(flags, 0, sizeof(*flags));
walk_mem_res(start, end, flags, __ioremap_res_check);
} }
/* /*
...@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long size, enum page_cache_mode pcm, void *caller) unsigned long size, enum page_cache_mode pcm, void *caller)
{ {
unsigned long offset, vaddr; unsigned long offset, vaddr;
resource_size_t pfn, last_pfn, last_addr; resource_size_t last_addr;
const resource_size_t unaligned_phys_addr = phys_addr; const resource_size_t unaligned_phys_addr = phys_addr;
const unsigned long unaligned_size = size; const unsigned long unaligned_size = size;
struct ioremap_mem_flags mem_flags;
struct vm_struct *area; struct vm_struct *area;
enum page_cache_mode new_pcm; enum page_cache_mode new_pcm;
pgprot_t prot; pgprot_t prot;
...@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
return NULL; return NULL;
} }
__ioremap_check_mem(phys_addr, size, &mem_flags);
/* /*
* Don't allow anybody to remap normal RAM that we're using.. * Don't allow anybody to remap normal RAM that we're using..
*/ */
pfn = phys_addr >> PAGE_SHIFT; if (mem_flags.system_ram) {
last_pfn = last_addr >> PAGE_SHIFT;
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
__ioremap_check_ram) == 1) {
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
&phys_addr, &last_addr); &phys_addr, &last_addr);
return NULL; return NULL;
...@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, ...@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
pcm = new_pcm; pcm = new_pcm;
} }
/*
* If the page being mapped is in memory and SEV is active then
* make sure the memory encryption attribute is enabled in the
* resulting mapping.
*/
prot = PAGE_KERNEL_IO; prot = PAGE_KERNEL_IO;
if (sev_active() && mem_flags.desc_other)
prot = pgprot_encrypted(prot);
switch (pcm) { switch (pcm) {
case _PAGE_CACHE_MODE_UC: case _PAGE_CACHE_MODE_UC:
default: default:
......
...@@ -271,6 +271,9 @@ extern int ...@@ -271,6 +271,9 @@ extern int
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
void *arg, int (*func)(unsigned long, unsigned long, void *)); void *arg, int (*func)(unsigned long, unsigned long, void *));
extern int extern int
walk_mem_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *));
extern int
walk_system_ram_res(u64 start, u64 end, void *arg, walk_system_ram_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *)); int (*func)(struct resource *, void *));
extern int extern int
......
...@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc, ...@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
res->start = p->start; res->start = p->start;
if (res->end > p->end) if (res->end > p->end)
res->end = p->end; res->end = p->end;
res->flags = p->flags;
res->desc = p->desc;
return 0; return 0;
} }
...@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg, ...@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
arg, func); arg, func);
} }
/*
* This function calls the @func callback against all memory ranges, which
* are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
*/
int walk_mem_res(u64 start, u64 end, void *arg,
int (*func)(struct resource *, void *))
{
struct resource res;
res.start = start;
res.end = end;
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
arg, func);
}
#if !defined(CONFIG_ARCH_HAS_WALK_MEMORY) #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment