Commit daece596 authored by Nicolas Pitre's avatar Nicolas Pitre Committed by Russell King

ARM: 7013/1: P2V: Remove ARM_PATCH_PHYS_VIRT_16BIT

This code can be removed now that MSM targets no longer need the 16-bit
offsets for P2V.
Signed-off-by: default avatarNicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 9e775ad1
...@@ -205,20 +205,12 @@ config ARM_PATCH_PHYS_VIRT ...@@ -205,20 +205,12 @@ config ARM_PATCH_PHYS_VIRT
kernel in system memory. kernel in system memory.
This can only be used with non-XIP MMU kernels where the base This can only be used with non-XIP MMU kernels where the base
of physical memory is at a 16MB boundary, or theoretically 64K of physical memory is at a 16MB boundary.
for the MSM machine class.
Only disable this option if you know that you do not require Only disable this option if you know that you do not require
this feature (eg, building a kernel for a single machine) and this feature (eg, building a kernel for a single machine) and
you need to shrink the kernel to the minimal size. you need to shrink the kernel to the minimal size.
config ARM_PATCH_PHYS_VIRT_16BIT
def_bool y
depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
help
This option extends the physical to virtual translation patching
to allow physical memory down to a theoretical minimum of 64K
boundaries.
source "init/Kconfig" source "init/Kconfig"
......
...@@ -160,7 +160,6 @@ ...@@ -160,7 +160,6 @@
* so that all we need to do is modify the 8-bit constant field. * so that all we need to do is modify the 8-bit constant field.
*/ */
#define __PV_BITS_31_24 0x81000000 #define __PV_BITS_31_24 0x81000000
#define __PV_BITS_23_16 0x00810000
extern unsigned long __pv_phys_offset; extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset #define PHYS_OFFSET __pv_phys_offset
...@@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x) ...@@ -178,9 +177,6 @@ static inline unsigned long __virt_to_phys(unsigned long x)
{ {
unsigned long t; unsigned long t;
__pv_stub(x, t, "add", __PV_BITS_31_24); __pv_stub(x, t, "add", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "add", __PV_BITS_23_16);
#endif
return t; return t;
} }
...@@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) ...@@ -188,9 +184,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
{ {
unsigned long t; unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24); __pv_stub(x, t, "sub", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "sub", __PV_BITS_23_16);
#endif
return t; return t;
} }
#else #else
......
...@@ -31,11 +31,7 @@ struct mod_arch_specific { ...@@ -31,11 +31,7 @@ struct mod_arch_specific {
/* Add __virt_to_phys patching state as well */ /* Add __virt_to_phys patching state as well */
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
#else
#define MODULE_ARCH_VERMAGIC_P2V "p2v8 " #define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
#endif
#else #else
#define MODULE_ARCH_VERMAGIC_P2V "" #define MODULE_ARCH_VERMAGIC_P2V ""
#endif #endif
......
...@@ -488,13 +488,8 @@ __fixup_pv_table: ...@@ -488,13 +488,8 @@ __fixup_pv_table:
add r5, r5, r3 @ adjust table end address add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
mov r6, r3, lsr #24 @ constant for add/sub instructions mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned teq r3, r6, lsl #24 @ must be 16MiB aligned
#else
mov r6, r3, lsr #16 @ constant for add/sub instructions
teq r3, r6, lsl #16 @ must be 64kiB aligned
#endif
THUMB( it ne @ cross section branch ) THUMB( it ne @ cross section branch )
bne __error bne __error
str r6, [r7, #4] @ save to __pv_offset str r6, [r7, #4] @ save to __pv_offset
...@@ -510,20 +505,8 @@ ENDPROC(__fixup_pv_table) ...@@ -510,20 +505,8 @@ ENDPROC(__fixup_pv_table)
.text .text
__fixup_a_pv_table: __fixup_a_pv_table:
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT lsls r6, #24
lsls r0, r6, #24 beq 2f
lsr r6, #8
beq 1f
clz r7, r0
lsr r0, #24
lsl r0, r7
bic r0, 0x0080
lsrs r7, #1
orrcs r0, #0x0080
orr r0, r0, r7, lsl #12
#endif
1: lsls r6, #24
beq 4f
clz r7, r6 clz r7, r6
lsr r6, #24 lsr r6, #24
lsl r6, r7 lsl r6, r7
...@@ -532,43 +515,25 @@ __fixup_a_pv_table: ...@@ -532,43 +515,25 @@ __fixup_a_pv_table:
orrcs r6, #0x0080 orrcs r6, #0x0080
orr r6, r6, r7, lsl #12 orr r6, r6, r7, lsl #12
orr r6, #0x4000 orr r6, #0x4000
b 4f b 2f
2: @ at this point the C flag is always clear 1: add r7, r3
add r7, r3 ldrh ip, [r7, #2]
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
ldrh ip, [r7]
tst ip, 0x0400 @ the i bit tells us LS or MS byte
beq 3f
cmp r0, #0 @ set C flag, and ...
biceq ip, 0x0400 @ immediate zero value has a special encoding
streqh ip, [r7] @ that requires the i bit cleared
#endif
3: ldrh ip, [r7, #2]
and ip, 0x8f00 and ip, 0x8f00
orrcc ip, r6 @ mask in offset bits 31-24 orr ip, r6 @ mask in offset bits 31-24
orrcs ip, r0 @ mask in offset bits 23-16
strh ip, [r7, #2] strh ip, [r7, #2]
4: cmp r4, r5 2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b bcc 1b
bx lr bx lr
#else #else
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT b 2f
and r0, r6, #255 @ offset bits 23-16 1: ldr ip, [r7, r3]
mov r6, r6, lsr #8 @ offset bits 31-24
#else
mov r0, #0 @ just in case...
#endif
b 3f
2: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff bic ip, ip, #0x000000ff
tst ip, #0x400 @ rotate shift tells us LS or MS byte orr ip, ip, r6 @ mask in offset bits 31-24
orrne ip, ip, r6 @ mask in offset bits 31-24
orreq ip, ip, r0 @ mask in offset bits 23-16
str ip, [r7, r3] str ip, [r7, r3]
3: cmp r4, r5 2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b bcc 1b
mov pc, lr mov pc, lr
#endif #endif
ENDPROC(__fixup_a_pv_table) ENDPROC(__fixup_a_pv_table)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment