Commit e4630fdd authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

x86/power/64: Always create temporary identity mapping correctly

The low-level resume-from-hibernation code on x86-64 uses
kernel_ident_mapping_init() to create the temoprary identity mapping,
but that function assumes that the offset between kernel virtual
addresses and physical addresses is aligned on the PGD level.

However, with a randomized identity mapping base, it may be aligned
on the PUD level and if that happens, the temporary identity mapping
created by set_up_temporary_mappings() will not reflect the actual
kernel identity mapping and the image restoration will fail as a
result (leading to a kernel panic most of the time).

To fix this problem, rework kernel_ident_mapping_init() to support
unaligned offsets between KVA and PA up to the PMD level and make
set_up_temporary_mappings() use it as approprtiate.
Reported-and-tested-by: default avatarThomas Garnier <thgarnie@google.com>
Reported-by: default avatarBorislav Petkov <bp@suse.de>
Suggested-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: default avatarYinghai Lu <yinghai@kernel.org>
parent c226fab4
...@@ -5,10 +5,10 @@ struct x86_mapping_info { ...@@ -5,10 +5,10 @@ struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */ void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */ void *context; /* context for alloc_pgt_page */
unsigned long pmd_flag; /* page flag for PMD entry */ unsigned long pmd_flag; /* page flag for PMD entry */
bool kernel_mapping; /* kernel mapping or ident mapping */ unsigned long offset; /* ident mapping offset */
}; };
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long addr, unsigned long end); unsigned long pstart, unsigned long pend);
#endif /* _ASM_X86_INIT_H */ #endif /* _ASM_X86_INIT_H */
...@@ -3,15 +3,17 @@ ...@@ -3,15 +3,17 @@
* included by both the compressed kernel and the regular kernel. * included by both the compressed kernel and the regular kernel.
*/ */
static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
unsigned long addr, unsigned long end) unsigned long addr, unsigned long end)
{ {
addr &= PMD_MASK; addr &= PMD_MASK;
for (; addr < end; addr += PMD_SIZE) { for (; addr < end; addr += PMD_SIZE) {
pmd_t *pmd = pmd_page + pmd_index(addr); pmd_t *pmd = pmd_page + pmd_index(addr);
if (!pmd_present(*pmd)) if (pmd_present(*pmd))
set_pmd(pmd, __pmd(addr | pmd_flag)); continue;
set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
} }
} }
...@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, ...@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (pud_present(*pud)) { if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0); pmd = pmd_offset(pud, 0);
ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pmd_init(info, pmd, addr, next);
continue; continue;
} }
pmd = (pmd_t *)info->alloc_pgt_page(info->context); pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd) if (!pmd)
return -ENOMEM; return -ENOMEM;
ident_pmd_init(info->pmd_flag, pmd, addr, next); ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
} }
...@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, ...@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
} }
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
unsigned long addr, unsigned long end) unsigned long pstart, unsigned long pend)
{ {
unsigned long addr = pstart + info->offset;
unsigned long end = pend + info->offset;
unsigned long next; unsigned long next;
int result; int result;
int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
for (; addr < end; addr = next) { for (; addr < end; addr = next) {
pgd_t *pgd = pgd_page + pgd_index(addr) + off; pgd_t *pgd = pgd_page + pgd_index(addr);
pud_t *pud; pud_t *pud;
next = (addr & PGDIR_MASK) + PGDIR_SIZE; next = (addr & PGDIR_MASK) + PGDIR_SIZE;
......
...@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void) ...@@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
struct x86_mapping_info info = { struct x86_mapping_info info = {
.alloc_pgt_page = alloc_pgt_page, .alloc_pgt_page = alloc_pgt_page,
.pmd_flag = __PAGE_KERNEL_LARGE_EXEC, .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
.kernel_mapping = true, .offset = __PAGE_OFFSET,
}; };
unsigned long mstart, mend; unsigned long mstart, mend;
pgd_t *pgd; pgd_t *pgd;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment