Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
53f1d9af
Commit
53f1d9af
authored
Apr 11, 2014
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branches 'fixes' and 'misc' into for-next
parents
bce5669b
c9d347e0
b5b6b5f5
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
123 additions
and
63 deletions
+123
-63
arch/arm/include/asm/assembler.h
arch/arm/include/asm/assembler.h
+42
-0
arch/arm/include/asm/cputype.h
arch/arm/include/asm/cputype.h
+19
-0
arch/arm/kernel/crash_dump.c
arch/arm/kernel/crash_dump.c
+1
-1
arch/arm/kernel/entry-header.S
arch/arm/kernel/entry-header.S
+0
-11
arch/arm/kernel/pj4-cp0.c
arch/arm/kernel/pj4-cp0.c
+4
-0
arch/arm/kernel/process.c
arch/arm/kernel/process.c
+2
-1
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+1
-0
arch/arm/mach-vexpress/dcscb.c
arch/arm/mach-vexpress/dcscb.c
+9
-4
arch/arm/mm/dump.c
arch/arm/mm/dump.c
+32
-15
arch/arm/vfp/entry.S
arch/arm/vfp/entry.S
+7
-18
arch/arm/vfp/vfphw.S
arch/arm/vfp/vfphw.S
+6
-13
No files found.
arch/arm/include/asm/assembler.h
View file @
53f1d9af
...
@@ -23,6 +23,7 @@
...
@@ -23,6 +23,7 @@
#include <asm/ptrace.h>
#include <asm/ptrace.h>
#include <asm/domain.h>
#include <asm/domain.h>
#include <asm/opcodes-virt.h>
#include <asm/opcodes-virt.h>
#include <asm/asm-offsets.h>
#define IOMEM(x) (x)
#define IOMEM(x) (x)
...
@@ -174,6 +175,47 @@
...
@@ -174,6 +175,47 @@
restore_irqs_notrace
\
oldcpsr
restore_irqs_notrace
\
oldcpsr
.
endm
.
endm
/*
* Get current thread_info.
*/
.
macro
get_thread_info
,
rd
ARM
(
mov
\
rd
,
sp
,
lsr
#
13
)
THUMB
(
mov
\
rd
,
sp
)
THUMB
(
lsr
\
rd
,
\
rd
,
#
13
)
mov
\
rd
,
\
rd
,
lsl
#
13
.
endm
/*
* Increment/decrement the preempt count.
*/
#ifdef CONFIG_PREEMPT_COUNT
.
macro
inc_preempt_count
,
ti
,
tmp
ldr
\
tmp
,
[
\
ti
,
#
TI_PREEMPT
]
@
get
preempt
count
add
\
tmp
,
\
tmp
,
#
1
@
increment
it
str
\
tmp
,
[
\
ti
,
#
TI_PREEMPT
]
.
endm
.
macro
dec_preempt_count
,
ti
,
tmp
ldr
\
tmp
,
[
\
ti
,
#
TI_PREEMPT
]
@
get
preempt
count
sub
\
tmp
,
\
tmp
,
#
1
@
decrement
it
str
\
tmp
,
[
\
ti
,
#
TI_PREEMPT
]
.
endm
.
macro
dec_preempt_count_ti
,
ti
,
tmp
get_thread_info
\
ti
dec_preempt_count
\
ti
,
\
tmp
.
endm
#else
.
macro
inc_preempt_count
,
ti
,
tmp
.
endm
.
macro
dec_preempt_count
,
ti
,
tmp
.
endm
.
macro
dec_preempt_count_ti
,
ti
,
tmp
.
endm
#endif
#define USER(x...) \
#define USER(x...) \
9999: x; \
9999: x; \
.pushsection __ex_table,"a"; \
.pushsection __ex_table,"a"; \
...
...
arch/arm/include/asm/cputype.h
View file @
53f1d9af
...
@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
...
@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1
#define cpu_is_xscale() 1
#endif
#endif
/*
* Marvell's PJ4 core is based on V7 version. It has some modification
* for coprocessor setting. For this reason, we need a way to distinguish
* it.
*/
#ifndef CONFIG_CPU_PJ4
#define cpu_is_pj4() 0
#else
static
inline
int
cpu_is_pj4
(
void
)
{
unsigned
int
id
;
id
=
read_cpuid_id
();
if
((
id
&
0xfffffff0
)
==
0x562f5840
)
return
1
;
return
0
;
}
#endif
#endif
#endif
arch/arm/kernel/crash_dump.c
View file @
53f1d9af
...
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
...
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if
(
!
csize
)
if
(
!
csize
)
return
0
;
return
0
;
vaddr
=
ioremap
(
pfn
<<
PAGE_SHIFT
,
PAGE_SIZE
);
vaddr
=
ioremap
(
__pfn_to_phys
(
pfn
)
,
PAGE_SIZE
);
if
(
!
vaddr
)
if
(
!
vaddr
)
return
-
ENOMEM
;
return
-
ENOMEM
;
...
...
arch/arm/kernel/entry-header.S
View file @
53f1d9af
...
@@ -236,11 +236,6 @@
...
@@ -236,11 +236,6 @@
movs
pc
,
lr
@
return
&
move
spsr_svc
into
cpsr
movs
pc
,
lr
@
return
&
move
spsr_svc
into
cpsr
.
endm
.
endm
.
macro
get_thread_info
,
rd
mov
\
rd
,
sp
,
lsr
#
13
mov
\
rd
,
\
rd
,
lsl
#
13
.
endm
@
@
@
32
-
bit
wide
"mov pc, reg"
@
32
-
bit
wide
"mov pc, reg"
@
@
...
@@ -306,12 +301,6 @@
...
@@ -306,12 +301,6 @@
.
endm
.
endm
#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* ifdef CONFIG_CPU_V7M / else */
.
macro
get_thread_info
,
rd
mov
\
rd
,
sp
lsr
\
rd
,
\
rd
,
#
13
mov
\
rd
,
\
rd
,
lsl
#
13
.
endm
@
@
@
32
-
bit
wide
"mov pc, reg"
@
32
-
bit
wide
"mov pc, reg"
@
@
...
...
arch/arm/kernel/pj4-cp0.c
View file @
53f1d9af
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
#include <asm/thread_notify.h>
#include <asm/cputype.h>
static
int
iwmmxt_do
(
struct
notifier_block
*
self
,
unsigned
long
cmd
,
void
*
t
)
static
int
iwmmxt_do
(
struct
notifier_block
*
self
,
unsigned
long
cmd
,
void
*
t
)
{
{
...
@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
...
@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
{
{
u32
cp_access
;
u32
cp_access
;
if
(
!
cpu_is_pj4
())
return
0
;
cp_access
=
pj4_cp_access_read
()
&
~
0xf
;
cp_access
=
pj4_cp_access_read
()
&
~
0xf
;
pj4_cp_access_write
(
cp_access
);
pj4_cp_access_write
(
cp_access
);
...
...
arch/arm/kernel/process.c
View file @
53f1d9af
...
@@ -39,6 +39,7 @@
...
@@ -39,6 +39,7 @@
#include <asm/processor.h>
#include <asm/processor.h>
#include <asm/thread_notify.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/mach/time.h>
#include <asm/mach/time.h>
#include <asm/tls.h>
#include <asm/tls.h>
...
@@ -100,7 +101,7 @@ void soft_restart(unsigned long addr)
...
@@ -100,7 +101,7 @@ void soft_restart(unsigned long addr)
u64
*
stack
=
soft_restart_stack
+
ARRAY_SIZE
(
soft_restart_stack
);
u64
*
stack
=
soft_restart_stack
+
ARRAY_SIZE
(
soft_restart_stack
);
/* Disable interrupts first */
/* Disable interrupts first */
local_irq_disable
();
raw_
local_irq_disable
();
local_fiq_disable
();
local_fiq_disable
();
/* Disable the L2 if we're the last man standing. */
/* Disable the L2 if we're the last man standing. */
...
...
arch/arm/kernel/traps.c
View file @
53f1d9af
...
@@ -445,6 +445,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
...
@@ -445,6 +445,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if
(
user_debug
&
UDBG_UNDEFINED
)
{
if
(
user_debug
&
UDBG_UNDEFINED
)
{
printk
(
KERN_INFO
"%s (%d): undefined instruction: pc=%p
\n
"
,
printk
(
KERN_INFO
"%s (%d): undefined instruction: pc=%p
\n
"
,
current
->
comm
,
task_pid_nr
(
current
),
pc
);
current
->
comm
,
task_pid_nr
(
current
),
pc
);
__show_regs
(
regs
);
dump_instr
(
KERN_INFO
,
regs
);
dump_instr
(
KERN_INFO
,
regs
);
}
}
#endif
#endif
...
...
arch/arm/mach-vexpress/dcscb.c
View file @
53f1d9af
...
@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
...
@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
v7_exit_coherency_flush
(
all
);
v7_exit_coherency_flush
(
all
);
/*
/*
* This is a harmless no-op. On platforms with a real
* A full outer cache flush could be needed at this point
* outer cache this might either be needed or not,
* on platforms with such a cache, depending on where the
* depending on where the outer cache sits.
* outer cache sits. In some cases the notion of a "last
* cluster standing" would need to be implemented if the
* outer cache is shared across clusters. In any case, when
* the outer cache needs flushing, there is no concurrent
* access to the cache controller to worry about and no
* special locking besides what is already provided by the
* MCPM state machinery is needed.
*/
*/
outer_flush_all
();
/*
/*
* Disable cluster-level coherency by masking
* Disable cluster-level coherency by masking
...
...
arch/arm/mm/dump.c
View file @
53f1d9af
...
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
...
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
};
};
static
const
struct
prot_bits
section_bits
[]
=
{
static
const
struct
prot_bits
section_bits
[]
=
{
#ifndef CONFIG_ARM_LPAE
#ifdef CONFIG_ARM_LPAE
/* These are approximate */
{
.
mask
=
PMD_SECT_USER
,
.
val
=
PMD_SECT_USER
,
.
set
=
"USR"
,
},
{
.
mask
=
PMD_SECT_RDONLY
,
.
val
=
PMD_SECT_RDONLY
,
.
set
=
"ro"
,
.
clear
=
"RW"
,
#elif __LINUX_ARM_ARCH__ >= 6
{
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
mask
=
PMD_SECT_AP
X
|
PMD_SECT_AP
_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
0
,
.
val
=
PMD_SECT_APX
|
PMD_SECT_AP_WRITE
,
.
set
=
" ro"
,
.
set
=
" ro"
,
},
{
},
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
mask
=
PMD_SECT_AP
X
|
PMD_SECT_AP
_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_WRITE
,
.
set
=
" RW"
,
.
set
=
" RW"
,
},
{
},
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
mask
=
PMD_SECT_AP
X
|
PMD_SECT_AP
_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_READ
,
.
val
=
PMD_SECT_AP_READ
,
.
set
=
"USR ro"
,
.
set
=
"USR ro"
,
},
{
},
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
mask
=
PMD_SECT_AP
X
|
PMD_SECT_AP
_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
set
=
"USR RW"
,
.
set
=
"USR RW"
,
#else
#else
/* ARMv4/ARMv5 */
/* These are approximate */
{
{
.
mask
=
PMD_SECT_USER
,
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_USER
,
.
val
=
0
,
.
set
=
"USR
"
,
.
set
=
" ro
"
,
},
{
},
{
.
mask
=
PMD_SECT_RDONLY
,
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_RDONLY
,
.
val
=
PMD_SECT_AP_WRITE
,
.
set
=
"ro"
,
.
set
=
" RW"
,
.
clear
=
"RW"
,
},
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_READ
,
.
set
=
"USR ro"
,
},
{
.
mask
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
val
=
PMD_SECT_AP_READ
|
PMD_SECT_AP_WRITE
,
.
set
=
"USR RW"
,
#endif
#endif
},
{
},
{
.
mask
=
PMD_SECT_XN
,
.
mask
=
PMD_SECT_XN
,
...
...
arch/arm/vfp/entry.S
View file @
53f1d9af
...
@@ -8,9 +8,12 @@
...
@@ -8,9 +8,12 @@
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
it
under
the
terms
of
the
GNU
General
Public
License
version
2
as
*
published
by
the
Free
Software
Foundation
.
*
published
by
the
Free
Software
Foundation
.
*/
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <asm/vfpmacros.h>
#include "../kernel/entry-header.S"
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
@
VFP
entry
point
.
@
VFP
entry
point
.
@
@
...
@@ -22,11 +25,7 @@
...
@@ -22,11 +25,7 @@
@
IRQs
disabled
.
@
IRQs
disabled
.
@
@
ENTRY
(
do_vfp
)
ENTRY
(
do_vfp
)
#ifdef CONFIG_PREEMPT_COUNT
inc_preempt_count
r10
,
r4
ldr
r4
,
[
r10
,
#
TI_PREEMPT
]
@
get
preempt
count
add
r11
,
r4
,
#
1
@
increment
it
str
r11
,
[
r10
,
#
TI_PREEMPT
]
#endif
enable_irq
enable_irq
ldr
r4
,
.
LCvfp
ldr
r4
,
.
LCvfp
ldr
r11
,
[
r10
,
#
TI_CPU
]
@
CPU
number
ldr
r11
,
[
r10
,
#
TI_CPU
]
@
CPU
number
...
@@ -35,12 +34,7 @@ ENTRY(do_vfp)
...
@@ -35,12 +34,7 @@ ENTRY(do_vfp)
ENDPROC
(
do_vfp
)
ENDPROC
(
do_vfp
)
ENTRY
(
vfp_null_entry
)
ENTRY
(
vfp_null_entry
)
#ifdef CONFIG_PREEMPT_COUNT
dec_preempt_count_ti
r10
,
r4
get_thread_info
r10
ldr
r4
,
[
r10
,
#
TI_PREEMPT
]
@
get
preempt
count
sub
r11
,
r4
,
#
1
@
decrement
it
str
r11
,
[
r10
,
#
TI_PREEMPT
]
#endif
mov
pc
,
lr
mov
pc
,
lr
ENDPROC
(
vfp_null_entry
)
ENDPROC
(
vfp_null_entry
)
...
@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
...
@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
__INIT
__INIT
ENTRY
(
vfp_testing_entry
)
ENTRY
(
vfp_testing_entry
)
#ifdef CONFIG_PREEMPT_COUNT
dec_preempt_count_ti
r10
,
r4
get_thread_info
r10
ldr
r4
,
[
r10
,
#
TI_PREEMPT
]
@
get
preempt
count
sub
r11
,
r4
,
#
1
@
decrement
it
str
r11
,
[
r10
,
#
TI_PREEMPT
]
#endif
ldr
r0
,
VFP_arch_address
ldr
r0
,
VFP_arch_address
str
r0
,
[
r0
]
@
set
to
non
-
zero
value
str
r0
,
[
r0
]
@
set
to
non
-
zero
value
mov
pc
,
r9
@
we
have
handled
the
fault
mov
pc
,
r9
@
we
have
handled
the
fault
...
...
arch/arm/vfp/vfphw.S
View file @
53f1d9af
...
@@ -14,10 +14,13 @@
...
@@ -14,10 +14,13 @@
*
r10
points
at
the
start
of
the
private
FP
workspace
in
the
thread
structure
*
r10
points
at
the
start
of
the
private
FP
workspace
in
the
thread
structure
*
sp
points
to
a
struct
pt_regs
(
as
defined
in
include
/
asm
/
proc
/
ptrace
.
h
)
*
sp
points
to
a
struct
pt_regs
(
as
defined
in
include
/
asm
/
proc
/
ptrace
.
h
)
*/
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/thread_info.h>
#include <asm/vfpmacros.h>
#include <asm/vfpmacros.h>
#include <linux/kern_levels.h>
#include <linux/kern_levels.h>
#include "../kernel/entry-header.S"
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
.
macro
DBGSTR
,
str
.
macro
DBGSTR
,
str
#ifdef DEBUG
#ifdef DEBUG
...
@@ -179,12 +182,7 @@ vfp_hw_state_valid:
...
@@ -179,12 +182,7 @@ vfp_hw_state_valid:
@
else
it
's one 32-bit instruction, so
@
else
it
's one 32-bit instruction, so
@
always
subtract
4
from
the
following
@
always
subtract
4
from
the
following
@
instruction
address
.
@
instruction
address
.
#ifdef CONFIG_PREEMPT_COUNT
dec_preempt_count_ti
r10
,
r4
get_thread_info
r10
ldr
r4
,
[
r10
,
#
TI_PREEMPT
]
@
get
preempt
count
sub
r11
,
r4
,
#
1
@
decrement
it
str
r11
,
[
r10
,
#
TI_PREEMPT
]
#endif
mov
pc
,
r9
@
we
think
we
have
handled
things
mov
pc
,
r9
@
we
think
we
have
handled
things
...
@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
...
@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
@
not
recognised
by
VFP
@
not
recognised
by
VFP
DBGSTR
"not VFP"
DBGSTR
"not VFP"
#ifdef CONFIG_PREEMPT_COUNT
dec_preempt_count_ti
r10
,
r4
get_thread_info
r10
ldr
r4
,
[
r10
,
#
TI_PREEMPT
]
@
get
preempt
count
sub
r11
,
r4
,
#
1
@
decrement
it
str
r11
,
[
r10
,
#
TI_PREEMPT
]
#endif
mov
pc
,
lr
mov
pc
,
lr
process_exception
:
process_exception
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment