Commit 463dbba4 authored by Linus Walleij's avatar Linus Walleij Committed by Russell King (Oracle)

ARM: 9104/2: Fix Keystone 2 kernel mapping regression

This fixes a Keystone 2 regression discovered as a side effect of
defining an passing the physical start/end sections of the kernel
to the MMU remapping code.

As the Keystone applies an offset to all physical addresses,
including those identified and patches by phys2virt, we fail to
account for this offset in the kernel_sec_start and kernel_sec_end
variables.

Further these offsets can extend into the 64bit range on LPAE
systems such as the Keystone 2.

Fix it like this:
- Extend kernel_sec_start and kernel_sec_end to be 64bit
- Add the offset also to kernel_sec_start and kernel_sec_end

As passing kernel_sec_start and kernel_sec_end as 64bit invariably
incurs BE8 endianness issues I have attempted to dry-code around
these.

Tested on the Vexpress QEMU model both with and without LPAE
enabled.

Fixes: 6e121df1 ("ARM: 9090/1: Map the lowmem and kernel separately")
Reported-by: default avatarNishanth Menon <nmenon@kernel.org>
Suggested-by: default avatarRussell King <rmk+kernel@armlinux.org.uk>
Tested-by: default avatarGrygorii Strashko <grygorii.strashko@ti.com>
Tested-by: default avatarNishanth Menon <nmenon@kernel.org>
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
parent e73f0f0e
...@@ -160,10 +160,11 @@ extern unsigned long vectors_base; ...@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
/* /*
* Physical start and end address of the kernel sections. These addresses are * Physical start and end address of the kernel sections. These addresses are
* 2MB-aligned to match the section mappings placed over the kernel. * 2MB-aligned to match the section mappings placed over the kernel. We use
* u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/ */
extern u32 kernel_sec_start; extern u64 kernel_sec_start;
extern u32 kernel_sec_end; extern u64 kernel_sec_end;
/* /*
* Physical vs virtual RAM address space conversion. These are * Physical vs virtual RAM address space conversion. These are
......
...@@ -49,7 +49,8 @@ ...@@ -49,7 +49,8 @@
/* /*
* This needs to be assigned at runtime when the linker symbols are * This needs to be assigned at runtime when the linker symbols are
* resolved. * resolved. These are unsigned 64bit really, but in this assembly code
* We store them as 32bit.
*/ */
.pushsection .data .pushsection .data
.align 2 .align 2
...@@ -57,7 +58,9 @@ ...@@ -57,7 +58,9 @@
.globl kernel_sec_end .globl kernel_sec_end
kernel_sec_start: kernel_sec_start:
.long 0 .long 0
.long 0
kernel_sec_end: kernel_sec_end:
.long 0
.long 0 .long 0
.popsection .popsection
...@@ -250,7 +253,11 @@ __create_page_tables: ...@@ -250,7 +253,11 @@ __create_page_tables:
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1) ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
str r8, [r5] @ Save physical start of kernel #ifdef CONFIG_CPU_ENDIAN_BE8
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER 1: str r3, [r0], #1 << PMD_ORDER
...@@ -259,7 +266,11 @@ __create_page_tables: ...@@ -259,7 +266,11 @@ __create_page_tables:
bls 1b bls 1b
eor r3, r3, r7 @ Remove the MMU flags eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
str r3, [r5] @ Save physical end of kernel #ifdef CONFIG_CPU_ENDIAN_BE8
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
/* /*
......
...@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc) ...@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
if (offset == 0) if (offset == 0)
return; return;
/*
* Offset the kernel section physical offsets so that the kernel
* mapping will work out later on.
*/
kernel_sec_start += offset;
kernel_sec_end += offset;
/* /*
* Get the address of the remap function in the 1:1 identity * Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We * mapping setup by the early page table assembly code. We
...@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc) ...@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
{ {
void *zero_page; void *zero_page;
pr_debug("physical kernel sections: 0x%08x-0x%08x\n", pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end); kernel_sec_start, kernel_sec_end);
prepare_page_table(); prepare_page_table();
......
...@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm) ...@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
ldr r6, =(_end - 1) ldr r6, =(_end - 1)
add r7, r2, #0x1000 add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER) add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, r5, [r7] 1: ldrd r4, r5, [r7]
adds r4, r4, r0 adds r4, r4, r0
adc r5, r5, r1 adc r5, r5, r1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment