Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
c2172ce2
Commit
c2172ce2
authored
Sep 11, 2015
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'uaccess' into fixes
parents
a4a5a737
0b61f2c0
Changes
31
Hide whitespace changes
Inline
Side-by-side
Showing
31 changed files
with
398 additions
and
139 deletions
+398
-139
arch/arm/Kconfig
arch/arm/Kconfig
+15
-0
arch/arm/include/asm/assembler.h
arch/arm/include/asm/assembler.h
+42
-0
arch/arm/include/asm/domain.h
arch/arm/include/asm/domain.h
+46
-11
arch/arm/include/asm/futex.h
arch/arm/include/asm/futex.h
+17
-2
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/pgtable-2level-hwdef.h
+1
-0
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/thread_info.h
+0
-4
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/uaccess.h
+80
-5
arch/arm/kernel/armksyms.c
arch/arm/kernel/armksyms.c
+3
-3
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-armv.S
+24
-8
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-common.S
+2
-0
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-header.S
+47
-65
arch/arm/kernel/head.S
arch/arm/kernel/head.S
+1
-4
arch/arm/kernel/process.c
arch/arm/kernel/process.c
+42
-9
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/swp_emulate.c
+3
-0
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+0
-1
arch/arm/lib/clear_user.S
arch/arm/lib/clear_user.S
+3
-3
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_from_user.S
+3
-3
arch/arm/lib/copy_to_user.S
arch/arm/lib/copy_to_user.S
+3
-3
arch/arm/lib/csumpartialcopyuser.S
arch/arm/lib/csumpartialcopyuser.S
+14
-0
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/lib/uaccess_with_memcpy.c
+2
-2
arch/arm/mm/abort-ev4.S
arch/arm/mm/abort-ev4.S
+1
-0
arch/arm/mm/abort-ev5t.S
arch/arm/mm/abort-ev5t.S
+3
-1
arch/arm/mm/abort-ev5tj.S
arch/arm/mm/abort-ev5tj.S
+3
-1
arch/arm/mm/abort-ev6.S
arch/arm/mm/abort-ev6.S
+5
-3
arch/arm/mm/abort-ev7.S
arch/arm/mm/abort-ev7.S
+1
-0
arch/arm/mm/abort-lv4t.S
arch/arm/mm/abort-lv4t.S
+2
-0
arch/arm/mm/abort-macro.S
arch/arm/mm/abort-macro.S
+6
-8
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+2
-2
arch/arm/mm/pgd.c
arch/arm/mm/pgd.c
+10
-0
arch/arm/nwfpe/entry.S
arch/arm/nwfpe/entry.S
+2
-1
arch/arm/xen/hypercall.S
arch/arm/xen/hypercall.S
+15
-0
No files found.
arch/arm/Kconfig
View file @
c2172ce2
...
...
@@ -1700,6 +1700,21 @@ config HIGHPTE
consumed by page tables. Setting this option will allow
user-space 2nd level page tables to reside in high memory.
config CPU_SW_DOMAIN_PAN
bool "Enable use of CPU domains to implement privileged no-access"
depends on MMU && !ARM_LPAE
default y
help
Increase kernel security by ensuring that normal kernel accesses
are unable to access userspace addresses. This can help prevent
use-after-free bugs becoming an exploitable privilege escalation
by ensuring that magic values (such as LIST_POISON) will always
fault when dereferenced.
CPUs with low-vector mappings use a best-efforts implementation.
Their lower 1MB needs to remain accessible for the vectors, but
the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS
...
...
arch/arm/include/asm/assembler.h
View file @
c2172ce2
...
...
@@ -445,6 +445,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.
endm
.
macro
uaccess_disable
,
tmp
,
isb
=
1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov
\
tmp
,
#
DACR_UACCESS_DISABLE
mcr
p15
,
0
,
\
tmp
,
c3
,
c0
,
0
@
Set
domain
register
.
if
\
isb
instr_sync
.
endif
#endif
.
endm
.
macro
uaccess_enable
,
tmp
,
isb
=
1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Whenever we re-enter userspace, the domains should always be
* set appropriately.
*/
mov
\
tmp
,
#
DACR_UACCESS_ENABLE
mcr
p15
,
0
,
\
tmp
,
c3
,
c0
,
0
.
if
\
isb
instr_sync
.
endif
#endif
.
endm
.
macro
uaccess_save
,
tmp
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
mrc
p15
,
0
,
\
tmp
,
c3
,
c0
,
0
str
\
tmp
,
[
sp
,
#
S_FRAME_SIZE
]
#endif
.
endm
.
macro
uaccess_restore
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
ldr
r0
,
[
sp
,
#
S_FRAME_SIZE
]
mcr
p15
,
0
,
r0
,
c3
,
c0
,
0
#endif
.
endm
.
irp
c
,,
eq
,
ne
,
cs
,
cc
,
mi
,
pl
,
vs
,
vc
,
hi
,
ls
,
ge
,
lt
,
gt
,
le
,
hs
,
lo
.
macro
ret
\
c
,
reg
#if __LINUX_ARM_ARCH__ < 6
...
...
arch/arm/include/asm/domain.h
View file @
c2172ce2
...
...
@@ -12,6 +12,7 @@
#ifndef __ASSEMBLY__
#include <asm/barrier.h>
#include <asm/thread_info.h>
#endif
/*
...
...
@@ -34,15 +35,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
#define DOMAIN_VECTORS 3
/*
* Domain types
...
...
@@ -55,30 +55,65 @@
#define DOMAIN_MANAGER 1
#endif
#define domain_val(dom,type) ((type) << (2*(dom)))
#define domain_mask(dom) ((3) << (2 * (dom)))
#define domain_val(dom,type) ((type) << (2 * (dom)))
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#else
#define DACR_INIT \
(domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
#endif
#define __DACR_DEFAULT \
domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
#define DACR_UACCESS_DISABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
#define DACR_UACCESS_ENABLE \
(__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS
static
inline
unsigned
int
get_domain
(
void
)
{
unsigned
int
domain
;
asm
(
"mrc p15, 0, %0, c3, c0 @ get domain"
:
"=r"
(
domain
)
:
"m"
(
current_thread_info
()
->
cpu_domain
));
return
domain
;
}
static
inline
void
set_domain
(
unsigned
val
)
{
asm
volatile
(
"mcr p15, 0, %0, c3, c0 @ set domain"
:
:
"r"
(
val
));
:
:
"r"
(
val
)
:
"memory"
);
isb
();
}
#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
struct thread_info *thread = current_thread_info(); \
unsigned int domain = thread->cpu_domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
thread->cpu_domain = domain | domain_val(dom, type); \
set_domain(thread->cpu_domain); \
unsigned int domain = get_domain(); \
domain &= ~domain_mask(dom); \
domain = domain | domain_val(dom, type); \
set_domain(domain); \
} while (0)
#else
static
inline
void
set_domain
(
unsigned
val
)
{
}
static
inline
void
modify_domain
(
unsigned
dom
,
unsigned
type
)
{
}
#endif
...
...
arch/arm/include/asm/futex.h
View file @
c2172ce2
...
...
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
__ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
...
...
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
: "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static
inline
int
futex_atomic_cmpxchg_inatomic
(
u32
*
uval
,
u32
__user
*
uaddr
,
u32
oldval
,
u32
newval
)
{
unsigned
int
__ua_flags
;
int
ret
;
u32
val
;
...
...
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb
();
/* Prefetching cannot fault */
prefetchw
(
uaddr
);
__ua_flags
=
uaccess_save_and_enable
();
__asm__
__volatile__
(
"@futex_atomic_cmpxchg_inatomic
\n
"
"1: ldrex %1, [%4]
\n
"
" teq %1, %2
\n
"
...
...
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
:
"=&r"
(
ret
),
"=&r"
(
val
)
:
"r"
(
oldval
),
"r"
(
newval
),
"r"
(
uaddr
),
"Ir"
(
-
EFAULT
)
:
"cc"
,
"memory"
);
uaccess_restore
(
__ua_flags
);
smp_mb
();
*
uval
=
val
;
...
...
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
({ \
unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
...
...
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory")
: "cc", "memory"); \
uaccess_restore(__ua_flags); \
})
static
inline
int
futex_atomic_cmpxchg_inatomic
(
u32
*
uval
,
u32
__user
*
uaddr
,
u32
oldval
,
u32
newval
)
{
unsigned
int
__ua_flags
;
int
ret
=
0
;
u32
val
;
...
...
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return
-
EFAULT
;
preempt_disable
();
__ua_flags
=
uaccess_save_and_enable
();
__asm__
__volatile__
(
"@futex_atomic_cmpxchg_inatomic
\n
"
"1: "
TUSER
(
ldr
)
" %1, [%4]
\n
"
" teq %1, %2
\n
"
...
...
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
:
"+r"
(
ret
),
"=&r"
(
val
)
:
"r"
(
oldval
),
"r"
(
newval
),
"r"
(
uaddr
),
"Ir"
(
-
EFAULT
)
:
"cc"
,
"memory"
);
uaccess_restore
(
__ua_flags
);
*
uval
=
val
;
preempt_enable
();
...
...
arch/arm/include/asm/pgtable-2level-hwdef.h
View file @
c2172ce2
...
...
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2)
/* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9)
/* v5 */
/*
* - section
...
...
arch/arm/include/asm/thread_info.h
View file @
c2172ce2
...
...
@@ -25,7 +25,6 @@
struct
task_struct
;
#include <asm/types.h>
#include <asm/domain.h>
typedef
unsigned
long
mm_segment_t
;
...
...
@@ -74,9 +73,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
.cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
...
...
arch/arm/include/asm/uaccess.h
View file @
c2172ce2
...
...
@@ -49,6 +49,35 @@ struct exception_table_entry
extern
int
fixup_exception
(
struct
pt_regs
*
regs
);
/*
* These two functions allow hooking accesses to userspace to increase
* system integrity by ensuring that the kernel can not inadvertantly
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
static
inline
unsigned
int
uaccess_save_and_enable
(
void
)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned
int
old_domain
=
get_domain
();
/* Set the current domain access to permit user accesses */
set_domain
((
old_domain
&
~
domain_mask
(
DOMAIN_USER
))
|
domain_val
(
DOMAIN_USER
,
DOMAIN_CLIENT
));
return
old_domain
;
#else
return
0
;
#endif
}
static
inline
void
uaccess_restore
(
unsigned
int
flags
)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
set_domain
(
flags
);
#endif
}
/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
...
...
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
...
...
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
...
...
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
...
...
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \
default: __e = __put_user_bad(); break; \
} \
uaccess_restore(__ua_flags); \
__e; \
})
...
...
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
...
...
@@ -381,9 +417,11 @@ do { \
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
__ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
...
...
@@ -391,6 +429,7 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
uaccess_restore(__ua_flags); \
} while (0)
#define __put_user_asm_byte(x, __pu_addr, err) \
...
...
@@ -474,11 +513,46 @@ do { \
#ifdef CONFIG_MMU
extern
unsigned
long
__must_check
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_to_user_std
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__clear_user
(
void
__user
*
addr
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__clear_user_std
(
void
__user
*
addr
,
unsigned
long
n
);
extern
unsigned
long
__must_check
arm_copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_from_user
(
void
*
to
,
const
void
__user
*
from
,
unsigned
long
n
)
{
unsigned
int
__ua_flags
=
uaccess_save_and_enable
();
n
=
arm_copy_from_user
(
to
,
from
,
n
);
uaccess_restore
(
__ua_flags
);
return
n
;
}
extern
unsigned
long
__must_check
arm_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__copy_to_user_std
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
unsigned
int
__ua_flags
=
uaccess_save_and_enable
();
n
=
arm_copy_to_user
(
to
,
from
,
n
);
uaccess_restore
(
__ua_flags
);
return
n
;
}
extern
unsigned
long
__must_check
arm_clear_user
(
void
__user
*
addr
,
unsigned
long
n
);
extern
unsigned
long
__must_check
__clear_user_std
(
void
__user
*
addr
,
unsigned
long
n
);
static
inline
unsigned
long
__must_check
__clear_user
(
void
__user
*
addr
,
unsigned
long
n
)
{
unsigned
int
__ua_flags
=
uaccess_save_and_enable
();
n
=
arm_clear_user
(
addr
,
n
);
uaccess_restore
(
__ua_flags
);
return
n
;
}
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
...
...
@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return
n
;
}
/* These are from lib/ code, and use __get_user() and friends */
extern
long
strncpy_from_user
(
char
*
dest
,
const
char
__user
*
src
,
long
count
);
extern
__must_check
long
strlen_user
(
const
char
__user
*
str
);
...
...
arch/arm/kernel/armksyms.c
View file @
c2172ce2
...
...
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
#ifdef CONFIG_MMU
EXPORT_SYMBOL
(
copy_page
);
EXPORT_SYMBOL
(
_
_copy_from_user
);
EXPORT_SYMBOL
(
_
_copy_to_user
);
EXPORT_SYMBOL
(
_
_clear_user
);
EXPORT_SYMBOL
(
arm
_copy_from_user
);
EXPORT_SYMBOL
(
arm
_copy_to_user
);
EXPORT_SYMBOL
(
arm
_clear_user
);
EXPORT_SYMBOL
(
__get_user_1
);
EXPORT_SYMBOL
(
__get_user_2
);
...
...
arch/arm/kernel/entry-armv.S
View file @
c2172ce2
...
...
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...)
#endif
.
macro
svc_entry
,
stack_hole
=
0
,
trace
=
1
.
macro
svc_entry
,
stack_hole
=
0
,
trace
=
1
,
uaccess
=
1
UNWIND
(.
fnstart
)
UNWIND
(.
save
{
r0
-
pc
}
)
sub
sp
,
sp
,
#(
S_FRAME_SIZE
+
\
stack_hole
-
4
)
sub
sp
,
sp
,
#(
S_FRAME_SIZE
+
8
+
\
stack_hole
-
4
)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX
(
str
r0
,
[
sp
]
)
@
temporarily
saved
SPFIX
(
mov
r0
,
sp
)
...
...
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia
r0
,
{
r3
-
r5
}
add
r7
,
sp
,
#
S_SP
-
4
@
here
for
interlock
avoidance
mov
r6
,
#-
1
@
""
""
""
""
add
r2
,
sp
,
#(
S_FRAME_SIZE
+
\
stack_hole
-
4
)
add
r2
,
sp
,
#(
S_FRAME_SIZE
+
8
+
\
stack_hole
-
4
)
SPFIX
(
addeq
r2
,
r2
,
#
4
)
str
r3
,
[
sp
,
#-
4
]!
@
save
the
"real"
r0
copied
@
from
the
exception
stack
...
...
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@
stmia
r7
,
{
r2
-
r6
}
uaccess_save
r0
.
if
\
uaccess
uaccess_disable
r0
.
endif
.
if
\
trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl
trace_hardirqs_off
...
...
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.
align
5
__dabt_svc
:
svc_entry
svc_entry
uaccess
=
0
mov
r2
,
sp
dabt_helper
THUMB
(
ldr
r5
,
[
sp
,
#
S_PSR
]
)
@
potentially
updated
CPSR
...
...
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
.
macro
usr_entry
,
trace
=
1
.
macro
usr_entry
,
trace
=
1
,
uaccess
=
1
UNWIND
(.
fnstart
)
UNWIND
(.
cantunwind
)
@
don
't unwind the user space
sub
sp
,
sp
,
#
S_FRAME_SIZE
...
...
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM
(
stmdb
r0
,
{
sp
,
lr
}^
)
THUMB
(
store_user_sp_lr
r0
,
r1
,
S_SP
-
S_PC
)
.
if
\
uaccess
uaccess_disable
ip
.
endif
@
Enable
the
alignment
trap
while
in
kernel
mode
ATRAP
(
teq
r8
,
r7
)
ATRAP
(
mcrne
p15
,
0
,
r8
,
c1
,
c0
,
0
)
...
...
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.
align
5
__dabt_usr
:
usr_entry
usr_entry
uaccess
=
0
kuser_cmpxchg_check
mov
r2
,
sp
dabt_helper
...
...
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.
align
5
__und_usr
:
usr_entry
usr_entry
uaccess
=
0
mov
r2
,
r4
mov
r3
,
r5
...
...
@@ -484,6 +493,8 @@ __und_usr:
1
:
ldrt
r0
,
[
r4
]
ARM_BE8
(
rev
r0
,
r0
)
@
little
endian
instruction
uaccess_disable
ip
@
r0
=
32
-
bit
ARM
instruction
which
caused
the
exception
@
r2
=
PC
value
for
the
following
instruction
(
:
=
regs
->
ARM_pc
)
@
r4
=
PC
value
for
the
faulting
instruction
...
...
@@ -518,9 +529,10 @@ __und_usr_thumb:
2
:
ldrht
r5
,
[
r4
]
ARM_BE8
(
rev16
r5
,
r5
)
@
little
endian
instruction
cmp
r5
,
#
0xe800
@
32
bit
instruction
if
xx
!=
0
blo
__und_usr_fault_16
@
16
bit
undefined
instruction
blo
__und_usr_fault_16
_pan
@
16
bit
undefined
instruction
3
:
ldrht
r0
,
[
r2
]
ARM_BE8
(
rev16
r0
,
r0
)
@
little
endian
instruction
uaccess_disable
ip
add
r2
,
r2
,
#
2
@
r2
is
PC
+
2
,
make
it
PC
+
4
str
r2
,
[
sp
,
#
S_PC
]
@
it
's a 2x16bit instr, update
orr
r0
,
r0
,
r5
,
lsl
#
16
...
...
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32
:
mov
r1
,
#
4
b
1
f
__und_usr_fault_16_pan
:
uaccess_disable
ip
__und_usr_fault_16
:
mov
r1
,
#
2
1
:
mov
r0
,
sp
...
...
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
ldr
r4
,
[
r2
,
#
TI_TP_VALUE
]
ldr
r5
,
[
r2
,
#
TI_TP_VALUE
+
4
]
#ifdef CONFIG_CPU_USE_DOMAINS
mrc
p15
,
0
,
r6
,
c3
,
c0
,
0
@
Get
domain
register
str
r6
,
[
r1
,
#
TI_CPU_DOMAIN
]
@
Save
old
domain
register
ldr
r6
,
[
r2
,
#
TI_CPU_DOMAIN
]
#endif
switch_tls
r1
,
r4
,
r5
,
r3
,
r7
...
...
arch/arm/kernel/entry-common.S
View file @
c2172ce2
...
...
@@ -174,6 +174,8 @@ ENTRY(vector_swi)
USER
(
ldr
scno
,
[
lr
,
#-
4
]
)
@
get
SWI
instruction
#endif
uaccess_disable
tbl
adr
tbl
,
sys_call_table
@
load
syscall
table
pointer
#if defined(CONFIG_OABI_COMPAT)
...
...
arch/arm/kernel/entry-header.S
View file @
c2172ce2
...
...
@@ -196,7 +196,7 @@
msr
cpsr_c
,
\
rtemp
@
switch
back
to
the
SVC
mode
.
endm
#ifndef CONFIG_THUMB2_KERNEL
.
macro
svc_exit
,
rpsr
,
irq
=
0
.
if
\
irq
!=
0
@
IRQs
already
off
...
...
@@ -215,6 +215,10 @@
blne
trace_hardirqs_off
#endif
.
endif
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@
ARM
mode
SVC
restore
msr
spsr_cxsf
,
\
rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@
We
must
avoid
clrex
due
to
Cortex
-
A15
erratum
#
830321
...
...
@@ -222,6 +226,20 @@
strex
r1
,
r2
,
[
r0
]
@
clear
the
exclusive
monitor
#endif
ldmia
sp
,
{
r0
-
pc
}^
@
load
r0
-
pc
,
cpsr
#else
@
Thumb
mode
SVC
restore
ldr
lr
,
[
sp
,
#
S_SP
]
@
top
of
the
stack
ldrd
r0
,
r1
,
[
sp
,
#
S_LR
]
@
calling
lr
and
pc
@
We
must
avoid
clrex
due
to
Cortex
-
A15
erratum
#
830321
strex
r2
,
r1
,
[
sp
,
#
S_LR
]
@
clear
the
exclusive
monitor
stmdb
lr
!,
{
r0
,
r1
,
\
rpsr
}
@
calling
lr
and
rfe
context
ldmia
sp
,
{
r0
-
r12
}
mov
sp
,
lr
ldr
lr
,
[
sp
],
#
4
rfeia
sp
!
#endif
.
endm
@
...
...
@@ -241,6 +259,9 @@
@
on
the
stack
remains
correct
)
.
@
.
macro
svc_exit_via_fiq
uaccess_restore
#ifndef CONFIG_THUMB2_KERNEL
@
ARM
mode
restore
mov
r0
,
sp
ldmib
r0
,
{
r1
-
r14
}
@
abort
is
deadly
from
here
onward
(
it
will
@
clobber
state
restored
below
)
...
...
@@ -250,9 +271,27 @@
msr
spsr_cxsf
,
r9
ldr
r0
,
[
r0
,
#
S_R0
]
ldmia
r8
,
{
pc
}^
#else
@
Thumb
mode
restore
add
r0
,
sp
,
#
S_R2
ldr
lr
,
[
sp
,
#
S_LR
]
ldr
sp
,
[
sp
,
#
S_SP
]
@
abort
is
deadly
from
here
onward
(
it
will
@
clobber
state
restored
below
)
ldmia
r0
,
{
r2
-
r12
}
mov
r1
,
#
FIQ_MODE
| PSR_I_BIT |
PSR_F_BIT
msr
cpsr_c
,
r1
sub
r0
,
#
S_R2
add
r8
,
r0
,
#
S_PC
ldmia
r0
,
{
r0
-
r1
}
rfeia
r8
#endif
.
endm
.
macro
restore_user_regs
,
fast
=
0
,
offset
=
0
uaccess_enable
r1
,
isb
=
0
#ifndef CONFIG_THUMB2_KERNEL
@
ARM
mode
restore
mov
r2
,
sp
ldr
r1
,
[
r2
,
#
\
offset
+
S_PSR
]
@
get
calling
cpsr
ldr
lr
,
[
r2
,
#
\
offset
+
S_PC
]!
@
get
pc
...
...
@@ -270,72 +309,16 @@
@
after
ldm
{}^
add
sp
,
sp
,
#
\
offset
+
S_FRAME_SIZE
movs
pc
,
lr
@
return
&
move
spsr_svc
into
cpsr
.
endm
#else /* CONFIG_THUMB2_KERNEL */
.
macro
svc_exit
,
rpsr
,
irq
=
0
.
if
\
irq
!=
0
@
IRQs
already
off
#ifdef CONFIG_TRACE_IRQFLAGS
@
The
parent
context
IRQs
must
have
been
enabled
to
get
here
in
@
the
first
place
,
so
there
's no point checking the PSR I bit.
bl
trace_hardirqs_on
#endif
.
else
@
IRQs
off
again
before
pulling
preserved
data
off
the
stack
disable_irq_notrace
#ifdef CONFIG_TRACE_IRQFLAGS
tst
\
rpsr
,
#
PSR_I_BIT
bleq
trace_hardirqs_on
tst
\
rpsr
,
#
PSR_I_BIT
blne
trace_hardirqs_off
#endif
.
endif
ldr
lr
,
[
sp
,
#
S_SP
]
@
top
of
the
stack
ldrd
r0
,
r1
,
[
sp
,
#
S_LR
]
@
calling
lr
and
pc
@
We
must
avoid
clrex
due
to
Cortex
-
A15
erratum
#
830321
strex
r2
,
r1
,
[
sp
,
#
S_LR
]
@
clear
the
exclusive
monitor
stmdb
lr
!,
{
r0
,
r1
,
\
rpsr
}
@
calling
lr
and
rfe
context
ldmia
sp
,
{
r0
-
r12
}
mov
sp
,
lr
ldr
lr
,
[
sp
],
#
4
rfeia
sp
!
.
endm
@
@
svc_exit_via_fiq
-
like
svc_exit
but
switches
to
FIQ
mode
before
exit
@
@
For
full
details
see
non
-
Thumb
implementation
above
.
@
.
macro
svc_exit_via_fiq
add
r0
,
sp
,
#
S_R2
ldr
lr
,
[
sp
,
#
S_LR
]
ldr
sp
,
[
sp
,
#
S_SP
]
@
abort
is
deadly
from
here
onward
(
it
will
@
clobber
state
restored
below
)
ldmia
r0
,
{
r2
-
r12
}
mov
r1
,
#
FIQ_MODE
| PSR_I_BIT |
PSR_F_BIT
msr
cpsr_c
,
r1
sub
r0
,
#
S_R2
add
r8
,
r0
,
#
S_PC
ldmia
r0
,
{
r0
-
r1
}
rfeia
r8
.
endm
#ifdef CONFIG_CPU_V7M
/
*
*
Note
we
don
't need to do clrex here as clearing the local monitor is
*
part
of
each
exception
entry
and
exit
sequence
.
*/
.
macro
restore_user_regs
,
fast
=
0
,
offset
=
0
#elif defined(CONFIG_CPU_V7M)
@
V7M
restore
.
@
Note
that
we
don
't need to do clrex here as clearing the local
@
monitor
is
part
of
the
exception
entry
and
exit
sequence
.
.
if
\
offset
add
sp
,
#
\
offset
.
endif
v7m_exception_slow_exit
ret_r0
=
\
fast
.
endm
#else /* ifdef CONFIG_CPU_V7M */
.
macro
restore_user_regs
,
fast
=
0
,
offset
=
0
#else
@
Thumb
mode
restore
mov
r2
,
sp
load_user_sp_lr
r2
,
r3
,
\
offset
+
S_SP
@
calling
sp
,
lr
ldr
r1
,
[
sp
,
#
\
offset
+
S_PSR
]
@
get
calling
cpsr
...
...
@@ -353,9 +336,8 @@
.
endif
add
sp
,
sp
,
#
S_FRAME_SIZE
-
S_SP
movs
pc
,
lr
@
return
&
move
spsr_svc
into
cpsr
.
endm
#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */
.
endm
/*
*
Context
tracking
subsystem
.
Used
to
instrument
transitions
...
...
arch/arm/kernel/head.S
View file @
c2172ce2
...
...
@@ -464,10 +464,7 @@ __enable_mmu:
#ifdef CONFIG_ARM_LPAE
mcrr
p15
,
0
,
r4
,
r5
,
c2
@
load
TTBR0
#else
mov
r5
,
#(
domain_val
(
DOMAIN_USER
,
DOMAIN_MANAGER
)
|
\
domain_val
(
DOMAIN_KERNEL
,
DOMAIN_MANAGER
)
|
\
domain_val
(
DOMAIN_TABLE
,
DOMAIN_MANAGER
)
|
\
domain_val
(
DOMAIN_IO
,
DOMAIN_CLIENT
))
mov
r5
,
#
DACR_INIT
mcr
p15
,
0
,
r5
,
c3
,
c0
,
0
@
load
domain
access
register
mcr
p15
,
0
,
r4
,
c2
,
c0
,
0
@
load
page
table
pointer
#endif
...
...
arch/arm/kernel/process.c
View file @
c2172ce2
...
...
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
buf
[
4
]
=
'\0'
;
#ifndef CONFIG_CPU_V7M
printk
(
"Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s
\n
"
,
buf
,
interrupts_enabled
(
regs
)
?
"n"
:
"ff"
,
fast_interrupts_enabled
(
regs
)
?
"n"
:
"ff"
,
processor_modes
[
processor_mode
(
regs
)],
isa_modes
[
isa_mode
(
regs
)],
get_fs
()
==
get_ds
()
?
"kernel"
:
"user"
);
{
unsigned
int
domain
=
get_domain
();
const
char
*
segment
;
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
* Get the domain register for the parent context. In user
* mode, we don't save the DACR, so lets use what it should
* be. For other modes, we place it after the pt_regs struct.
*/
if
(
user_mode
(
regs
))
domain
=
DACR_UACCESS_ENABLE
;
else
domain
=
*
(
unsigned
int
*
)(
regs
+
1
);
#endif
if
((
domain
&
domain_mask
(
DOMAIN_USER
))
==
domain_val
(
DOMAIN_USER
,
DOMAIN_NOACCESS
))
segment
=
"none"
;
else
if
(
get_fs
()
==
get_ds
())
segment
=
"kernel"
;
else
segment
=
"user"
;
printk
(
"Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s
\n
"
,
buf
,
interrupts_enabled
(
regs
)
?
"n"
:
"ff"
,
fast_interrupts_enabled
(
regs
)
?
"n"
:
"ff"
,
processor_modes
[
processor_mode
(
regs
)],
isa_modes
[
isa_mode
(
regs
)],
segment
);
}
#else
printk
(
"xPSR: %08lx
\n
"
,
regs
->
ARM_cpsr
);
#endif
...
...
@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs)
buf
[
0
]
=
'\0'
;
#ifdef CONFIG_CPU_CP15_MMU
{
unsigned
int
transbase
,
dac
;
unsigned
int
transbase
,
dac
=
get_domain
()
;
asm
(
"mrc p15, 0, %0, c2, c0
\n\t
"
"mrc p15, 0, %1, c3, c0
\n
"
:
"=r"
(
transbase
),
"=r"
(
dac
));
:
"=r"
(
transbase
));
snprintf
(
buf
,
sizeof
(
buf
),
" Table: %08x DAC: %08x"
,
transbase
,
dac
);
}
...
...
@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset
(
&
thread
->
cpu_context
,
0
,
sizeof
(
struct
cpu_context_save
));
#ifdef CONFIG_CPU_USE_DOMAINS
/*
* Copy the initial value of the domain access control register
* from the current thread: thread->addr_limit will have been
* copied from the current thread via setup_thread_stack() in
* kernel/fork.c
*/
thread
->
cpu_domain
=
get_domain
();
#endif
if
(
likely
(
!
(
p
->
flags
&
PF_KTHREAD
)))
{
*
childregs
=
*
current_pt_regs
();
childregs
->
ARM_r0
=
0
;
...
...
arch/arm/kernel/swp_emulate.c
View file @
c2172ce2
...
...
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while
(
1
)
{
unsigned
long
temp
;
unsigned
int
__ua_flags
;
__ua_flags
=
uaccess_save_and_enable
();
if
(
type
==
TYPE_SWPB
)
__user_swpb_asm
(
*
data
,
address
,
res
,
temp
);
else
__user_swp_asm
(
*
data
,
address
,
res
,
temp
);
uaccess_restore
(
__ua_flags
);
if
(
likely
(
res
!=
-
EAGAIN
)
||
signal_pending
(
current
))
break
;
...
...
arch/arm/kernel/traps.c
View file @
c2172ce2
...
...
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
kuser_init
(
vectors_base
);
flush_icache_range
(
vectors
,
vectors
+
PAGE_SIZE
*
2
);
modify_domain
(
DOMAIN_USER
,
DOMAIN_CLIENT
);
#else
/* ifndef CONFIG_CPU_V7M */
/*
* on V7-M there is no need to copy the vector table to a dedicated
...
...
arch/arm/lib/clear_user.S
View file @
c2172ce2
...
...
@@ -12,14 +12,14 @@
.
text
/*
Prototype
:
int
_
_clear_user
(
void
*
addr
,
size_t
sz
)
/*
Prototype
:
unsigned
long
arm
_clear_user
(
void
*
addr
,
size_t
sz
)
*
Purpose
:
clear
some
user
memory
*
Params
:
addr
-
user
memory
address
to
clear
*
:
sz
-
number
of
bytes
to
clear
*
Returns
:
number
of
bytes
NOT
cleared
*/
ENTRY
(
__clear_user_std
)
WEAK
(
_
_clear_user
)
WEAK
(
arm
_clear_user
)
stmfd
sp
!,
{
r1
,
lr
}
mov
r2
,
#
0
cmp
r1
,
#
4
...
...
@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER
(
strnebt
r2
,
[
r0
])
mov
r0
,
#
0
ldmfd
sp
!,
{
r1
,
pc
}
ENDPROC
(
_
_clear_user
)
ENDPROC
(
arm
_clear_user
)
ENDPROC
(
__clear_user_std
)
.
pushsection
.
text
.
fixup
,
"ax"
...
...
arch/arm/lib/copy_from_user.S
View file @
c2172ce2
...
...
@@ -17,7 +17,7 @@
/*
*
Prototype
:
*
*
size_t
_
_copy_from_user
(
void
*
to
,
const
void
*
from
,
size_t
n
)
*
size_t
arm
_copy_from_user
(
void
*
to
,
const
void
*
from
,
size_t
n
)
*
*
Purpose
:
*
...
...
@@ -89,11 +89,11 @@
.
text
ENTRY
(
_
_copy_from_user
)
ENTRY
(
arm
_copy_from_user
)
#include "copy_template.S"
ENDPROC
(
_
_copy_from_user
)
ENDPROC
(
arm
_copy_from_user
)
.
pushsection
.
fixup
,
"ax"
.
align
0
...
...
arch/arm/lib/copy_to_user.S
View file @
c2172ce2
...
...
@@ -17,7 +17,7 @@
/*
*
Prototype
:
*
*
size_t
_
_copy_to_user
(
void
*
to
,
const
void
*
from
,
size_t
n
)
*
size_t
arm
_copy_to_user
(
void
*
to
,
const
void
*
from
,
size_t
n
)
*
*
Purpose
:
*
...
...
@@ -93,11 +93,11 @@
.
text
ENTRY
(
__copy_to_user_std
)
WEAK
(
_
_copy_to_user
)
WEAK
(
arm
_copy_to_user
)
#include "copy_template.S"
ENDPROC
(
_
_copy_to_user
)
ENDPROC
(
arm
_copy_to_user
)
ENDPROC
(
__copy_to_user_std
)
.
pushsection
.
text
.
fixup
,
"ax"
...
...
arch/arm/lib/csumpartialcopyuser.S
View file @
c2172ce2
...
...
@@ -17,6 +17,19 @@
.
text
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
.
macro
save_regs
mrc
p15
,
0
,
ip
,
c3
,
c0
,
0
stmfd
sp
!,
{
r1
,
r2
,
r4
-
r8
,
ip
,
lr
}
uaccess_enable
ip
.
endm
.
macro
load_regs
ldmfd
sp
!,
{
r1
,
r2
,
r4
-
r8
,
ip
,
lr
}
mcr
p15
,
0
,
ip
,
c3
,
c0
,
0
ret
lr
.
endm
#else
.
macro
save_regs
stmfd
sp
!,
{
r1
,
r2
,
r4
-
r8
,
lr
}
.
endm
...
...
@@ -24,6 +37,7 @@
.
macro
load_regs
ldmfd
sp
!,
{
r1
,
r2
,
r4
-
r8
,
pc
}
.
endm
#endif
.
macro
load1b
,
reg1
ldrusr
\
reg1
,
r0
,
1
...
...
arch/arm/lib/uaccess_with_memcpy.c
View file @
c2172ce2
...
...
@@ -136,7 +136,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
unsigned
long
_
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
arm
_copy_to_user
(
void
__user
*
to
,
const
void
*
from
,
unsigned
long
n
)
{
/*
* This test is stubbed out of the main function above to keep
...
...
@@ -190,7 +190,7 @@ __clear_user_memset(void __user *addr, unsigned long n)
return
n
;
}
unsigned
long
_
_clear_user
(
void
__user
*
addr
,
unsigned
long
n
)
unsigned
long
arm
_clear_user
(
void
__user
*
addr
,
unsigned
long
n
)
{
/* See rational for this in __copy_to_user() above. */
if
(
n
<
64
)
...
...
arch/arm/mm/abort-ev4.S
View file @
c2172ce2
...
...
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc
p15
,
0
,
r1
,
c5
,
c0
,
0
@
get
FSR
mrc
p15
,
0
,
r0
,
c6
,
c0
,
0
@
get
FAR
ldr
r3
,
[
r4
]
@
read
aborted
ARM
instruction
uaccess_disable
ip
@
disable
userspace
access
bic
r1
,
r1
,
#
1
<<
11
|
1
<<
10
@
clear
bits
11
and
10
of
FSR
tst
r3
,
#
1
<<
20
@
L
=
1
->
write
?
orreq
r1
,
r1
,
#
1
<<
11
@
yes
.
...
...
arch/arm/mm/abort-ev5t.S
View file @
c2172ce2
...
...
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc
p15
,
0
,
r0
,
c6
,
c0
,
0
@
get
FAR
do_thumb_abort
fsr
=
r1
,
pc
=
r4
,
psr
=
r5
,
tmp
=
r3
ldreq
r3
,
[
r4
]
@
read
aborted
ARM
instruction
uaccess_disable
ip
@
disable
user
access
bic
r1
,
r1
,
#
1
<<
11
@
clear
bits
11
of
FSR
do_ldrd_abort
tmp
=
ip
,
insn
=
r3
teq_ldrd
tmp
=
ip
,
insn
=
r3
@
insn
was
LDRD
?
beq
do_DataAbort
@
yes
tst
r3
,
#
1
<<
20
@
check
write
orreq
r1
,
r1
,
#
1
<<
11
b
do_DataAbort
arch/arm/mm/abort-ev5tj.S
View file @
c2172ce2
...
...
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne
do_DataAbort
do_thumb_abort
fsr
=
r1
,
pc
=
r4
,
psr
=
r5
,
tmp
=
r3
ldreq
r3
,
[
r4
]
@
read
aborted
ARM
instruction
do_ldrd_abort
tmp
=
ip
,
insn
=
r3
uaccess_disable
ip
@
disable
userspace
access
teq_ldrd
tmp
=
ip
,
insn
=
r3
@
insn
was
LDRD
?
beq
do_DataAbort
@
yes
tst
r3
,
#
1
<<
20
@
L
=
0
->
write
orreq
r1
,
r1
,
#
1
<<
11
@
yes
.
b
do_DataAbort
arch/arm/mm/abort-ev6.S
View file @
c2172ce2
...
...
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr
ip
,
=
0x4107b36
mrc
p15
,
0
,
r3
,
c0
,
c0
,
0
@
get
processor
id
teq
ip
,
r3
,
lsr
#
4
@
r0
ARM1136
?
bne
do_DataAbort
bne
1
f
tst
r5
,
#
PSR_J_BIT
@
Java
?
tsteq
r5
,
#
PSR_T_BIT
@
Thumb
?
bne
do_DataAbort
bne
1
f
bic
r1
,
r1
,
#
1
<<
11
@
clear
bit
11
of
FSR
ldr
r3
,
[
r4
]
@
read
aborted
ARM
instruction
ARM_BE8
(
rev
r3
,
r3
)
do_ldrd_abort
tmp
=
ip
,
insn
=
r3
teq_ldrd
tmp
=
ip
,
insn
=
r3
@
insn
was
LDRD
?
beq
1
f
@
yes
tst
r3
,
#
1
<<
20
@
L
=
0
->
write
orreq
r1
,
r1
,
#
1
<<
11
@
yes
.
#endif
1
:
uaccess_disable
ip
@
disable
userspace
access
b
do_DataAbort
arch/arm/mm/abort-ev7.S
View file @
c2172ce2
...
...
@@ -15,6 +15,7 @@
ENTRY
(
v7_early_abort
)
mrc
p15
,
0
,
r1
,
c5
,
c0
,
0
@
get
FSR
mrc
p15
,
0
,
r0
,
c6
,
c0
,
0
@
get
FAR
uaccess_disable
ip
@
disable
userspace
access
/
*
*
V6
code
adjusts
the
returned
DFSR
.
...
...
arch/arm/mm/abort-lv4t.S
View file @
c2172ce2
...
...
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif
bne
.
data_thumb_abort
ldr
r8
,
[
r4
]
@
read
arm
instruction
uaccess_disable
ip
@
disable
userspace
access
tst
r8
,
#
1
<<
20
@
L
=
1
->
write
?
orreq
r1
,
r1
,
#
1
<<
11
@
yes
.
and
r7
,
r8
,
#
15
<<
24
...
...
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.
data_thumb_abort
:
ldrh
r8
,
[
r4
]
@
read
instruction
uaccess_disable
ip
@
disable
userspace
access
tst
r8
,
#
1
<<
11
@
L
=
1
->
write
?
orreq
r1
,
r1
,
#
1
<<
8
@
yes
and
r7
,
r8
,
#
15
<<
12
...
...
arch/arm/mm/abort-macro.S
View file @
c2172ce2
...
...
@@ -13,6 +13,7 @@
tst
\
psr
,
#
PSR_T_BIT
beq
not_thumb
ldrh
\
tmp
,
[
\
pc
]
@
Read
aborted
Thumb
instruction
uaccess_disable
ip
@
disable
userspace
access
and
\
tmp
,
\
tmp
,
#
0xfe00
@
Mask
opcode
field
cmp
\
tmp
,
#
0x5600
@
Is
it
ldrsb
?
orreq
\
tmp
,
\
tmp
,
#
1
<<
11
@
Set
L
-
bit
if
yes
...
...
@@ -29,12 +30,9 @@ not_thumb:
*
[
7
:
4
]
==
1101
*
[
20
]
==
0
*/
.
macro
do_ldrd_abort
,
tmp
,
insn
tst
\
insn
,
#
0x0e100000
@
[
27
:
25
,
20
]
==
0
bne
not_ldrd
and
\
tmp
,
\
insn
,
#
0x000000f0
@
[
7
:
4
]
==
1101
cmp
\
tmp
,
#
0x000000d0
beq
do_DataAbort
not_ldrd
:
.
macro
teq_ldrd
,
tmp
,
insn
mov
\
tmp
,
#
0x0e100000
orr
\
tmp
,
#
0x000000f0
and
\
tmp
,
\
insn
,
\
tmp
teq
\
tmp
,
#
0x000000d0
.
endm
arch/arm/mm/mmu.c
View file @
c2172ce2
...
...
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_RDONLY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
domain
=
DOMAIN_
USER
,
.
domain
=
DOMAIN_
VECTORS
,
},
[
MT_HIGH_VECTORS
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
|
L_PTE_USER
|
L_PTE_RDONLY
,
.
prot_l1
=
PMD_TYPE_TABLE
,
.
domain
=
DOMAIN_
USER
,
.
domain
=
DOMAIN_
VECTORS
,
},
[
MT_MEMORY_RWX
]
=
{
.
prot_pte
=
L_PTE_PRESENT
|
L_PTE_YOUNG
|
L_PTE_DIRTY
,
...
...
arch/arm/mm/pgd.c
View file @
c2172ce2
...
...
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if
(
!
new_pte
)
goto
no_pte
;
#ifndef CONFIG_ARM_LPAE
/*
* Modify the PTE pointer to have the correct domain. This
* needs to be the vectors domain to avoid the low vectors
* being unmapped.
*/
pmd_val
(
*
new_pmd
)
&=
~
PMD_DOMAIN_MASK
;
pmd_val
(
*
new_pmd
)
|=
PMD_DOMAIN
(
DOMAIN_VECTORS
);
#endif
init_pud
=
pud_offset
(
init_pgd
,
0
);
init_pmd
=
pmd_offset
(
init_pud
,
0
);
init_pte
=
pte_offset_map
(
init_pmd
,
0
);
...
...
arch/arm/nwfpe/entry.S
View file @
c2172ce2
...
...
@@ -95,9 +95,10 @@ emulate:
reteq
r4
@
no
,
return
failure
next
:
uaccess_enable
r3
.
Lx1
:
ldrt
r6
,
[
r5
],
#
4
@
get
the
next
instruction
and
@
increment
PC
uaccess_disable
r3
and
r2
,
r6
,
#
0x0F000000
@
test
for
FP
insns
teq
r2
,
#
0x0C000000
teqne
r2
,
#
0x0D000000
...
...
arch/arm/xen/hypercall.S
View file @
c2172ce2
...
...
@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
mov
r1
,
r2
mov
r2
,
r3
ldr
r3
,
[
sp
,
#
8
]
/
*
*
Privcmd
calls
are
issued
by
the
userspace
.
We
need
to
allow
the
*
kernel
to
access
the
userspace
memory
before
issuing
the
hypercall
.
*/
uaccess_enable
r4
/
*
r4
is
loaded
now
as
we
use
it
as
scratch
register
before
*/
ldr
r4
,
[
sp
,
#
4
]
__HVC
(
XEN_IMM
)
/
*
*
Disable
userspace
access
from
kernel
.
This
is
fine
to
do
it
*
unconditionally
as
no
set_fs
(
KERNEL_DS
)/
set_fs
(
get_ds
())
is
*
called
before
.
*/
uaccess_disable
r4
ldm
sp
!,
{
r4
}
ret
lr
ENDPROC
(
privcmd_call
)
;
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment