Commit 07344b15 authored by Tom Lendacky's avatar Tom Lendacky Committed by Thomas Gleixner

x86/boot: Fix SEV boot failure from change to __PHYSICAL_MASK_SHIFT

In arch/x86/boot/compressed/kaslr_64.c, CONFIG_AMD_MEM_ENCRYPT support was
initially #undef'd to support SME with minimal effort.  When support for
SEV was added, the #undef remained and some minimal support for setting the
encryption bit was added for building identity mapped pagetable entries.

Commit b83ce5ee ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52")
changed __PHYSICAL_MASK_SHIFT from 46 to 52 in support of 5-level paging.
This change resulted in SEV guests failing to boot because the encryption
bit was no longer being automatically masked out.  The compressed boot
path now requires sme_me_mask to be defined in order for the pagetable
functions, such as pud_present(), to properly mask out the encryption bit
(currently bit 47) when evaluating pagetable entries.

Add an sme_me_mask variable in arch/x86/boot/compressed/mem_encrypt.S,
which is set when SEV is active, delete the #undef CONFIG_AMD_MEM_ENCRYPT
from arch/x86/boot/compressed/kaslr_64.c and use sme_me_mask when building
the identify mapped pagetable entries.

Fixes: b83ce5ee ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52")
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lkml.kernel.org/r/20180327220711.8702.55842.stgit@tlendack-t1.amdoffice.net
parent 547edaca
...@@ -16,13 +16,6 @@ ...@@ -16,13 +16,6 @@
#define __pa(x) ((unsigned long)(x)) #define __pa(x) ((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x))) #define __va(x) ((void *)((unsigned long)(x)))
/*
* The pgtable.h and mm/ident_map.c includes make use of the SME related
* information which is not used in the compressed image support. Un-define
* the SME support to avoid any compile and link errors.
*/
#undef CONFIG_AMD_MEM_ENCRYPT
/* No PAGE_TABLE_ISOLATION support needed either: */ /* No PAGE_TABLE_ISOLATION support needed either: */
#undef CONFIG_PAGE_TABLE_ISOLATION #undef CONFIG_PAGE_TABLE_ISOLATION
...@@ -85,13 +78,14 @@ static struct x86_mapping_info mapping_info; ...@@ -85,13 +78,14 @@ static struct x86_mapping_info mapping_info;
/* Locates and clears a region for a new top level page table. */ /* Locates and clears a region for a new top level page table. */
void initialize_identity_maps(void) void initialize_identity_maps(void)
{ {
unsigned long sev_me_mask = get_sev_encryption_mask(); /* If running as an SEV guest, the encryption mask is required. */
set_sev_encryption_mask();
/* Init mapping_info with run-time function/buffer pointers. */ /* Init mapping_info with run-time function/buffer pointers. */
mapping_info.alloc_pgt_page = alloc_pgt_page; mapping_info.alloc_pgt_page = alloc_pgt_page;
mapping_info.context = &pgt_data; mapping_info.context = &pgt_data;
mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask;
mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; mapping_info.kernpg_flag = _KERNPG_TABLE;
/* /*
* It should be impossible for this not to already be true, * It should be impossible for this not to already be true,
......
...@@ -88,9 +88,7 @@ ENTRY(get_sev_encryption_bit) ...@@ -88,9 +88,7 @@ ENTRY(get_sev_encryption_bit)
ENDPROC(get_sev_encryption_bit) ENDPROC(get_sev_encryption_bit)
.code64 .code64
ENTRY(get_sev_encryption_mask) ENTRY(set_sev_encryption_mask)
xor %rax, %rax
#ifdef CONFIG_AMD_MEM_ENCRYPT #ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp push %rbp
push %rdx push %rdx
...@@ -101,9 +99,7 @@ ENTRY(get_sev_encryption_mask) ...@@ -101,9 +99,7 @@ ENTRY(get_sev_encryption_mask)
testl %eax, %eax testl %eax, %eax
jz .Lno_sev_mask jz .Lno_sev_mask
xor %rdx, %rdx bts %rax, sme_me_mask(%rip) /* Create the encryption mask */
bts %rax, %rdx /* Create the encryption mask */
mov %rdx, %rax /* ... and return it */
.Lno_sev_mask: .Lno_sev_mask:
movq %rbp, %rsp /* Restore original stack pointer */ movq %rbp, %rsp /* Restore original stack pointer */
...@@ -112,9 +108,16 @@ ENTRY(get_sev_encryption_mask) ...@@ -112,9 +108,16 @@ ENTRY(get_sev_encryption_mask)
pop %rbp pop %rbp
#endif #endif
xor %rax, %rax
ret ret
ENDPROC(get_sev_encryption_mask) ENDPROC(set_sev_encryption_mask)
.data .data
enc_bit: enc_bit:
.int 0xffffffff .int 0xffffffff
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
GLOBAL(sme_me_mask)
.quad 0
#endif
...@@ -114,6 +114,6 @@ static inline void console_init(void) ...@@ -114,6 +114,6 @@ static inline void console_init(void)
{ } { }
#endif #endif
unsigned long get_sev_encryption_mask(void); void set_sev_encryption_mask(void);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment