Commit 2190fed6 authored by Russell King's avatar Russell King

ARM: entry: provide uaccess assembly macro hooks

Provide hooks into the kernel entry and exit paths to permit control
of userspace visibility to the kernel.  The intended use is:

- on entry to kernel from user, uaccess_disable will be called to
  disable userspace visibility
- on exit from kernel to user, uaccess_enable will be called to
  enable userspace visibility
- on entry from a kernel exception, uaccess_save_and_disable will be
  called to save the current userspace visibility setting, and disable
  access
- on exit from a kernel exception, uaccess_restore will be called to
  restore the userspace visibility as it was before the exception
  occurred.

These hooks allows us to keep userspace visibility disabled for the
vast majority of the kernel, except for localised regions where we
want to explicitly access userspace.
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent aa06e5c1
...@@ -445,6 +445,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) ...@@ -445,6 +445,23 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif #endif
.endm .endm
.macro uaccess_disable, tmp, isb=1
.endm
.macro uaccess_enable, tmp, isb=1
.endm
.macro uaccess_save, tmp
.endm
.macro uaccess_restore
.endm
.macro uaccess_save_and_disable, tmp
uaccess_save \tmp
uaccess_disable \tmp
.endm
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg .macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
......
...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid) ...@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...) #define SPFIX(code...)
#endif #endif
.macro svc_entry, stack_hole=0, trace=1 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.save {r0 - pc} ) UNWIND(.save {r0 - pc} )
sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp ) SPFIX( mov r0, sp )
...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid) ...@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5} ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" "" mov r6, #-1 @ "" "" "" ""
add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 ) SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack @ from the exception stack
...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid) ...@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@ @
stmia r7, {r2 - r6} stmia r7, {r2 - r6}
uaccess_save r0
.if \uaccess
uaccess_disable r0
.endif
.if \trace .if \trace
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off bl trace_hardirqs_off
...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid) ...@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.align 5 .align 5
__dabt_svc: __dabt_svc:
svc_entry svc_entry uaccess=0
mov r2, sp mov r2, sp
dabt_helper dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt) ...@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8" #error "sizeof(struct pt_regs) must be a multiple of 8"
#endif #endif
.macro usr_entry, trace=1 .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart ) UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE sub sp, sp, #S_FRAME_SIZE
...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt) ...@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM( stmdb r0, {sp, lr}^ ) ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
.if \uaccess
uaccess_disable ip
.endif
@ Enable the alignment trap while in kernel mode @ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7) ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0) ATRAP( mcrne p15, 0, r8, c1, c0, 0)
...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt) ...@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.align 5 .align 5
__dabt_usr: __dabt_usr:
usr_entry usr_entry uaccess=0
kuser_cmpxchg_check kuser_cmpxchg_check
mov r2, sp mov r2, sp
dabt_helper dabt_helper
...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr) ...@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.align 5 .align 5
__und_usr: __und_usr:
usr_entry usr_entry uaccess=0
mov r2, r4 mov r2, r4
mov r3, r5 mov r3, r5
...@@ -484,6 +493,8 @@ __und_usr: ...@@ -484,6 +493,8 @@ __und_usr:
1: ldrt r0, [r4] 1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction ARM_BE8(rev r0, r0) @ little endian instruction
uaccess_disable ip
@ r0 = 32-bit ARM instruction which caused the exception @ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc) @ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction @ r4 = PC value for the faulting instruction
...@@ -518,9 +529,10 @@ __und_usr_thumb: ...@@ -518,9 +529,10 @@ __und_usr_thumb:
2: ldrht r5, [r4] 2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0 cmp r5, #0xe800 @ 32bit instruction if xx != 0
blo __und_usr_fault_16 @ 16bit undefined instruction blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2] 3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction ARM_BE8(rev16 r0, r0) @ little endian instruction
uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16 orr r0, r0, r5, lsl #16
...@@ -715,6 +727,8 @@ ENDPROC(no_fp) ...@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32: __und_usr_fault_32:
mov r1, #4 mov r1, #4
b 1f b 1f
__und_usr_fault_16_pan:
uaccess_disable ip
__und_usr_fault_16: __und_usr_fault_16:
mov r1, #2 mov r1, #2
1: mov r0, sp 1: mov r0, sp
......
...@@ -173,6 +173,8 @@ ENTRY(vector_swi) ...@@ -173,6 +173,8 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif #endif
uaccess_disable tbl
adr tbl, sys_call_table @ load syscall table pointer adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT) #if defined(CONFIG_OABI_COMPAT)
......
...@@ -215,6 +215,7 @@ ...@@ -215,6 +215,7 @@
blne trace_hardirqs_off blne trace_hardirqs_off
#endif #endif
.endif .endif
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
@ ARM mode SVC restore @ ARM mode SVC restore
...@@ -258,6 +259,7 @@ ...@@ -258,6 +259,7 @@
@ on the stack remains correct). @ on the stack remains correct).
@ @
.macro svc_exit_via_fiq .macro svc_exit_via_fiq
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore @ ARM mode restore
mov r0, sp mov r0, sp
...@@ -287,6 +289,7 @@ ...@@ -287,6 +289,7 @@
.macro restore_user_regs, fast = 0, offset = 0 .macro restore_user_regs, fast = 0, offset = 0
uaccess_enable r1, isb=0
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
@ ARM mode restore @ ARM mode restore
mov r2, sp mov r2, sp
......
...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort) ...@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write? tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
......
...@@ -21,6 +21,7 @@ ENTRY(v5t_early_abort) ...@@ -21,6 +21,7 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR bic r1, r1, #1 << 11 @ clear bits 11 of FSR
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes beq do_DataAbort @ yes
......
...@@ -24,6 +24,7 @@ ENTRY(v5tj_early_abort) ...@@ -24,6 +24,7 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction ldreq r3, [r4] @ read aborted ARM instruction
uaccess_disable ip @ disable userspace access
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
......
...@@ -26,17 +26,18 @@ ENTRY(v6_early_abort) ...@@ -26,17 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36 ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136? teq ip, r3, lsr #4 @ r0 ARM1136?
bne do_DataAbort bne 1f
tst r5, #PSR_J_BIT @ Java? tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb? tsteq r5, #PSR_T_BIT @ Thumb?
bne do_DataAbort bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3) ARM_BE8(rev r3, r3)
teq_ldrd tmp=ip, insn=r3 @ insn was LDRD? teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
beq do_DataAbort @ yes beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
#endif #endif
1: uaccess_disable ip @ disable userspace access
b do_DataAbort b do_DataAbort
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
ENTRY(v7_early_abort) ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
/* /*
* V6 code adjusts the returned DFSR. * V6 code adjusts the returned DFSR.
......
...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort) ...@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif #endif
bne .data_thumb_abort bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction ldr r8, [r4] @ read arm instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write? tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24 and r7, r8, #15 << 24
...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort) ...@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort: .data_thumb_abort:
ldrh r8, [r4] @ read instruction ldrh r8, [r4] @ read instruction
uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write? tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12 and r7, r8, #15 << 12
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT tst \psr, #PSR_T_BIT
beq not_thumb beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction ldrh \tmp, [\pc] @ Read aborted Thumb instruction
uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb? cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment