Commit 90303b10 authored by Catalin Marinas's avatar Catalin Marinas Committed by Russell King

[ARM] 3256/1: Make the function-returning ldm's use sp as the base register

Patch from Catalin Marinas

If the low interrupt latency mode is enabled for the CPU (from ARMv6
onwards), the ldm/stm instructions are no longer atomic. An ldm instruction
restoring the sp and pc registers can be interrupted immediately after sp
was updated but before the pc. If this happens, the CPU restores the base
register to the value before the ldm instruction but if the base register
is not sp, the interrupt routine will corrupt the stack and the restarted
ldm instruction will load garbage.

Note that future ARM cores might always run in the low interrupt latency
mode.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent ece5f7b3
...@@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs) ...@@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
ldmia %1, {r8 - r14}\n\ ldmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\ msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\ mov r0, r0\n\
ldmea fp, {fp, sp, pc}" ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
} }
...@@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs) ...@@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
stmia %1, {r8 - r14}\n\ stmia %1, {r8 - r14}\n\
msr cpsr_c, %0 @ return to SVC mode\n\ msr cpsr_c, %0 @ return to SVC mode\n\
mov r0, r0\n\ mov r0, r0\n\
ldmea fp, {fp, sp, pc}" ldmfd sp, {fp, sp, pc}"
: "=&r" (tmp) : "=&r" (tmp)
: "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE)); : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
} }
......
...@@ -18,11 +18,13 @@ ...@@ -18,11 +18,13 @@
*/ */
.macro save_regs .macro save_regs
mov ip, sp
stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc} stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc}
sub fp, ip, #4
.endm .endm
.macro load_regs,flags .macro load_regs
LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc}) ldmfd sp, {r1, r4 - r8, fp, sp, pc}
.endm .endm
.macro load1b, reg1 .macro load1b, reg1
......
...@@ -23,7 +23,7 @@ len .req r2 ...@@ -23,7 +23,7 @@ len .req r2
sum .req r3 sum .req r3
.Lzero: mov r0, sum .Lzero: mov r0, sum
load_regs ea load_regs
/* /*
* Align an unaligned destination pointer. We know that * Align an unaligned destination pointer. We know that
...@@ -87,9 +87,7 @@ sum .req r3 ...@@ -87,9 +87,7 @@ sum .req r3
b .Ldone b .Ldone
FN_ENTRY FN_ENTRY
mov ip, sp
save_regs save_regs
sub fp, ip, #4
cmp len, #8 @ Ensure that we have at least cmp len, #8 @ Ensure that we have at least
blo .Lless8 @ 8 bytes to copy. blo .Lless8 @ 8 bytes to copy.
...@@ -163,7 +161,7 @@ FN_ENTRY ...@@ -163,7 +161,7 @@ FN_ENTRY
ldr sum, [sp, #0] @ dst ldr sum, [sp, #0] @ dst
tst sum, #1 tst sum, #1
movne r0, r0, ror #8 movne r0, r0, ror #8
load_regs ea load_regs
.Lsrc_not_aligned: .Lsrc_not_aligned:
adc sum, sum, #0 @ include C from dst alignment adc sum, sum, #0 @ include C from dst alignment
......
...@@ -18,11 +18,13 @@ ...@@ -18,11 +18,13 @@
.text .text
.macro save_regs .macro save_regs
mov ip, sp
stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc} stmfd sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
sub fp, ip, #4
.endm .endm
.macro load_regs,flags .macro load_regs
ldm\flags fp, {r1, r2, r4-r8, fp, sp, pc} ldmfd sp, {r1, r2, r4-r8, fp, sp, pc}
.endm .endm
.macro load1b, reg1 .macro load1b, reg1
...@@ -100,5 +102,5 @@ ...@@ -100,5 +102,5 @@
6002: teq r2, r1 6002: teq r2, r1
strneb r0, [r1], #1 strneb r0, [r1], #1
bne 6002b bne 6002b
load_regs ea load_regs
.previous .previous
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment