Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
2741ecb4
Commit
2741ecb4
authored
Feb 25, 2010
by
Russell King
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'misc2' into devel
parents
bc85e585
5de813b6
Changes
72
Show whitespace changes
Inline
Side-by-side
Showing
72 changed files
with
1092 additions
and
562 deletions
+1092
-562
Documentation/arm/memory.txt
Documentation/arm/memory.txt
+5
-1
arch/arm/Kconfig
arch/arm/Kconfig
+24
-0
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/Makefile
+1
-5
arch/arm/boot/compressed/decompress.c
arch/arm/boot/compressed/decompress.c
+45
-0
arch/arm/boot/compressed/misc.c
arch/arm/boot/compressed/misc.c
+11
-98
arch/arm/boot/compressed/vmlinux.lds.in
arch/arm/boot/compressed/vmlinux.lds.in
+7
-1
arch/arm/configs/omap_4430sdp_defconfig
arch/arm/configs/omap_4430sdp_defconfig
+3
-0
arch/arm/include/asm/atomic.h
arch/arm/include/asm/atomic.h
+228
-0
arch/arm/include/asm/io.h
arch/arm/include/asm/io.h
+9
-2
arch/arm/include/asm/mach/time.h
arch/arm/include/asm/mach/time.h
+0
-8
arch/arm/include/asm/memory.h
arch/arm/include/asm/memory.h
+13
-10
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu.h
+1
-0
arch/arm/include/asm/mmu_context.h
arch/arm/include/asm/mmu_context.h
+15
-0
arch/arm/include/asm/pgtable-nommu.h
arch/arm/include/asm/pgtable-nommu.h
+2
-2
arch/arm/include/asm/setup.h
arch/arm/include/asm/setup.h
+0
-12
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock.h
+23
-13
arch/arm/include/asm/system.h
arch/arm/include/asm/system.h
+1
-2
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/thread_info.h
+2
-1
arch/arm/kernel/Makefile
arch/arm/kernel/Makefile
+1
-0
arch/arm/kernel/leds.c
arch/arm/kernel/leds.c
+115
-0
arch/arm/kernel/ptrace.c
arch/arm/kernel/ptrace.c
+35
-18
arch/arm/kernel/setup.c
arch/arm/kernel/setup.c
+31
-48
arch/arm/kernel/time.c
arch/arm/kernel/time.c
+0
-178
arch/arm/kernel/traps.c
arch/arm/kernel/traps.c
+25
-10
arch/arm/kernel/vmlinux.lds.S
arch/arm/kernel/vmlinux.lds.S
+0
-4
arch/arm/mach-davinci/include/mach/hardware.h
arch/arm/mach-davinci/include/mach/hardware.h
+1
-1
arch/arm/mach-davinci/io.c
arch/arm/mach-davinci/io.c
+1
-1
arch/arm/mach-dove/include/mach/vmalloc.h
arch/arm/mach-dove/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ep93xx/include/mach/vmalloc.h
arch/arm/mach-ep93xx/include/mach/vmalloc.h
+1
-1
arch/arm/mach-footbridge/common.c
arch/arm/mach-footbridge/common.c
+4
-3
arch/arm/mach-gemini/include/mach/vmalloc.h
arch/arm/mach-gemini/include/mach/vmalloc.h
+1
-1
arch/arm/mach-iop13xx/io.c
arch/arm/mach-iop13xx/io.c
+4
-3
arch/arm/mach-iop32x/include/mach/vmalloc.h
arch/arm/mach-iop32x/include/mach/vmalloc.h
+1
-1
arch/arm/mach-iop33x/include/mach/vmalloc.h
arch/arm/mach-iop33x/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ixp2000/include/mach/vmalloc.h
arch/arm/mach-ixp2000/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ixp23xx/include/mach/vmalloc.h
arch/arm/mach-ixp23xx/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ixp4xx/include/mach/vmalloc.h
arch/arm/mach-ixp4xx/include/mach/vmalloc.h
+1
-1
arch/arm/mach-kirkwood/include/mach/vmalloc.h
arch/arm/mach-kirkwood/include/mach/vmalloc.h
+1
-1
arch/arm/mach-lh7a40x/include/mach/vmalloc.h
arch/arm/mach-lh7a40x/include/mach/vmalloc.h
+1
-1
arch/arm/mach-loki/include/mach/vmalloc.h
arch/arm/mach-loki/include/mach/vmalloc.h
+1
-1
arch/arm/mach-mmp/include/mach/vmalloc.h
arch/arm/mach-mmp/include/mach/vmalloc.h
+1
-1
arch/arm/mach-msm/io.c
arch/arm/mach-msm/io.c
+2
-1
arch/arm/mach-mv78xx0/include/mach/vmalloc.h
arch/arm/mach-mv78xx0/include/mach/vmalloc.h
+1
-1
arch/arm/mach-nomadik/include/mach/vmalloc.h
arch/arm/mach-nomadik/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ns9xxx/include/mach/vmalloc.h
arch/arm/mach-ns9xxx/include/mach/vmalloc.h
+1
-1
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-4430sdp.c
+54
-0
arch/arm/mach-orion5x/include/mach/vmalloc.h
arch/arm/mach-orion5x/include/mach/vmalloc.h
+1
-1
arch/arm/mach-pxa/include/mach/vmalloc.h
arch/arm/mach-pxa/include/mach/vmalloc.h
+1
-1
arch/arm/mach-realview/include/mach/vmalloc.h
arch/arm/mach-realview/include/mach/vmalloc.h
+1
-1
arch/arm/mach-s3c24a0/include/mach/vmalloc.h
arch/arm/mach-s3c24a0/include/mach/vmalloc.h
+1
-1
arch/arm/mach-sa1100/include/mach/vmalloc.h
arch/arm/mach-sa1100/include/mach/vmalloc.h
+1
-1
arch/arm/mach-u300/include/mach/vmalloc.h
arch/arm/mach-u300/include/mach/vmalloc.h
+1
-1
arch/arm/mach-ux500/include/mach/vmalloc.h
arch/arm/mach-ux500/include/mach/vmalloc.h
+1
-1
arch/arm/mach-w90x900/include/mach/vmalloc.h
arch/arm/mach-w90x900/include/mach/vmalloc.h
+1
-1
arch/arm/mm/Kconfig
arch/arm/mm/Kconfig
+4
-4
arch/arm/mm/alignment.c
arch/arm/mm/alignment.c
+1
-5
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-l2x0.c
+62
-10
arch/arm/mm/context.c
arch/arm/mm/context.c
+110
-14
arch/arm/mm/dma-mapping.c
arch/arm/mm/dma-mapping.c
+0
-3
arch/arm/mm/init.c
arch/arm/mm/init.c
+99
-14
arch/arm/mm/ioremap.c
arch/arm/mm/ioremap.c
+34
-23
arch/arm/mm/mmu.c
arch/arm/mm/mmu.c
+21
-20
arch/arm/mm/nommu.c
arch/arm/mm/nommu.c
+12
-0
arch/arm/plat-iop/io.c
arch/arm/plat-iop/io.c
+2
-1
arch/arm/plat-mxc/include/mach/vmalloc.h
arch/arm/plat-mxc/include/mach/vmalloc.h
+1
-1
arch/arm/plat-omap/Kconfig
arch/arm/plat-omap/Kconfig
+1
-0
arch/arm/plat-omap/include/plat/omap44xx.h
arch/arm/plat-omap/include/plat/omap44xx.h
+1
-0
arch/arm/plat-omap/io.c
arch/arm/plat-omap/io.c
+1
-1
arch/arm/plat-s3c/include/mach/vmalloc.h
arch/arm/plat-s3c/include/mach/vmalloc.h
+1
-1
arch/arm/plat-stmp3xxx/include/mach/vmalloc.h
arch/arm/plat-stmp3xxx/include/mach/vmalloc.h
+1
-1
arch/arm/vfp/vfpmodule.c
arch/arm/vfp/vfpmodule.c
+36
-19
drivers/serial/amba-pl011.c
drivers/serial/amba-pl011.c
+19
-0
No files found.
Documentation/arm/memory.txt
View file @
2741ecb4
...
...
@@ -59,7 +59,11 @@ PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically
maps all platform RAM in a 1:1 relationship.
TASK_SIZE PAGE_OFFSET-1 Kernel module space
PKMAP_BASE PAGE_OFFSET-1 Permanent kernel mappings
One way of mapping HIGHMEM pages into kernel
space.
MODULES_VADDR MODULES_END-1 Kernel module space
Kernel modules inserted via insmod are
placed here using dynamic mappings.
...
...
arch/arm/Kconfig
View file @
2741ecb4
...
...
@@ -12,6 +12,7 @@ config ARM
select HAVE_IDE
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K)
select HAVE_OPROFILE
select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL)
...
...
@@ -54,6 +55,9 @@ config HAVE_TCM
bool
select GENERIC_ALLOCATOR
config HAVE_PROC_CPU
bool
config NO_IOPORT
bool
...
...
@@ -163,6 +167,11 @@ config ARCH_MTD_XIP
config GENERIC_HARDIRQS_NO__DO_IRQ
def_bool y
config ARM_L1_CACHE_SHIFT_6
bool
help
Setting ARM L1 cache line size to 64 Bytes.
if OPROFILE
config OPROFILE_ARMV6
...
...
@@ -649,6 +658,7 @@ config ARCH_S5PC1XX
select GENERIC_GPIO
select HAVE_CLK
select CPU_V7
select ARM_L1_CACHE_SHIFT_6
help
Samsung S5PC1XX series based systems
...
...
@@ -938,6 +948,19 @@ config ARM_ERRATA_460075
ACTLR register. Note that setting specific bits in the ACTLR register
may not be available in non-secure mode.
config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4
help
The PL310 L2 cache controller implements three types of Clean &
Invalidate maintenance operations: by Physical Address
(offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
They are architecturally defined to behave as the execution of a
clean operation followed immediately by an invalidate operation,
both performing to the same memory location. This functionality
is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations. Note that this errata
uses Texas Instrument's secure monitor api.
endmenu
source "arch/arm/common/Kconfig"
...
...
@@ -1255,6 +1278,7 @@ config ALIGNMENT_TRAP
bool
depends on CPU_CP15_MMU
default y if !ARCH_EBSA110
select HAVE_PROC_CPU if PROC_FS
help
ARM processors cannot fetch/store information which is not
naturally aligned on the bus, i.e., a 4 byte fetch must start at an
...
...
arch/arm/boot/compressed/Makefile
View file @
2741ecb4
...
...
@@ -5,7 +5,7 @@
#
HEAD
=
head.o
OBJS
=
misc.o
OBJS
=
misc.o
decompress.o
FONTC
=
$(srctree)
/drivers/video/console/font_acorn_8x8.c
#
...
...
@@ -106,10 +106,6 @@ lib1funcs = $(obj)/lib1funcs.o
$(obj)/lib1funcs.S
:
$(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE
$(
call
cmd,shipped
)
# Don't allow any static data in misc.o, which
# would otherwise mess up our GOT table
CFLAGS_misc.o
:=
-Dstatic
=
$(obj)/vmlinux
:
$(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o
\
$(addprefix $(obj)/
,
$(OBJS)) $(lib1funcs) FORCE
$(
call
if_changed,ld
)
...
...
arch/arm/boot/compressed/decompress.c
0 → 100644
View file @
2741ecb4
#define _LINUX_STRING_H_
#include <linux/compiler.h>
/* for inline */
#include <linux/types.h>
/* for size_t */
#include <linux/stddef.h>
/* for NULL */
#include <linux/linkage.h>
#include <asm/string.h>
extern
unsigned
long
free_mem_ptr
;
extern
unsigned
long
free_mem_end_ptr
;
extern
void
error
(
char
*
);
#define STATIC static
#define ARCH_HAS_DECOMP_WDOG
/* Diagnostic functions */
#ifdef DEBUG
# define Assert(cond,msg) {if(!(cond)) error(msg);}
# define Trace(x) fprintf x
# define Tracev(x) {if (verbose) fprintf x ;}
# define Tracevv(x) {if (verbose>1) fprintf x ;}
# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
#else
# define Assert(cond,msg)
# define Trace(x)
# define Tracev(x)
# define Tracevv(x)
# define Tracec(c,x)
# define Tracecv(c,x)
#endif
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
void
do_decompress
(
u8
*
input
,
int
len
,
u8
*
output
,
void
(
*
error
)(
char
*
x
))
{
decompress
(
input
,
len
,
NULL
,
NULL
,
output
,
NULL
,
error
);
}
arch/arm/boot/compressed/misc.c
View file @
2741ecb4
...
...
@@ -23,8 +23,8 @@ unsigned int __machine_arch_type;
#include <linux/compiler.h>
/* for inline */
#include <linux/types.h>
/* for size_t */
#include <linux/stddef.h>
/* for NULL */
#include <asm/string.h>
#include <linux/linkage.h>
#include <asm/string.h>
#include <asm/unaligned.h>
...
...
@@ -117,57 +117,7 @@ static void putstr(const char *ptr)
#endif
#define __ptr_t void *
#define memzero(s,n) __memzero(s,n)
/*
* Optimised C version of memzero for the ARM.
*/
void
__memzero
(
__ptr_t
s
,
size_t
n
)
{
union
{
void
*
vp
;
unsigned
long
*
ulp
;
unsigned
char
*
ucp
;
}
u
;
int
i
;
u
.
vp
=
s
;
for
(
i
=
n
>>
5
;
i
>
0
;
i
--
)
{
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
}
if
(
n
&
1
<<
4
)
{
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
}
if
(
n
&
1
<<
3
)
{
*
u
.
ulp
++
=
0
;
*
u
.
ulp
++
=
0
;
}
if
(
n
&
1
<<
2
)
*
u
.
ulp
++
=
0
;
if
(
n
&
1
<<
1
)
{
*
u
.
ucp
++
=
0
;
*
u
.
ucp
++
=
0
;
}
if
(
n
&
1
)
*
u
.
ucp
++
=
0
;
}
static
inline
__ptr_t
memcpy
(
__ptr_t
__dest
,
__const
__ptr_t
__src
,
size_t
__n
)
void
*
memcpy
(
void
*
__dest
,
__const
void
*
__src
,
size_t
__n
)
{
int
i
=
0
;
unsigned
char
*
d
=
(
unsigned
char
*
)
__dest
,
*
s
=
(
unsigned
char
*
)
__src
;
...
...
@@ -204,59 +154,20 @@ static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
/*
* gzip delarations
*/
#define STATIC static
/* Diagnostic functions */
#ifdef DEBUG
# define Assert(cond,msg) {if(!(cond)) error(msg);}
# define Trace(x) fprintf x
# define Tracev(x) {if (verbose) fprintf x ;}
# define Tracevv(x) {if (verbose>1) fprintf x ;}
# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
#else
# define Assert(cond,msg)
# define Trace(x)
# define Tracev(x)
# define Tracevv(x)
# define Tracec(c,x)
# define Tracecv(c,x)
#endif
static
void
error
(
char
*
m
);
extern
char
input_data
[];
extern
char
input_data_end
[];
static
unsigned
char
*
output_data
;
static
unsigned
long
output_ptr
;
static
void
error
(
char
*
m
);
unsigned
char
*
output_data
;
unsigned
long
output_ptr
;
static
void
putstr
(
const
char
*
);
static
unsigned
long
free_mem_ptr
;
static
unsigned
long
free_mem_end_ptr
;
#ifdef STANDALONE_DEBUG
#define NO_INFLATE_MALLOC
#endif
#define ARCH_HAS_DECOMP_WDOG
#ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c"
#endif
#ifdef CONFIG_KERNEL_LZO
#include "../../../../lib/decompress_unlzo.c"
#endif
unsigned
long
free_mem_ptr
;
unsigned
long
free_mem_end_ptr
;
#ifndef arch_error
#define arch_error(x)
#endif
static
void
error
(
char
*
x
)
void
error
(
char
*
x
)
{
arch_error
(
x
);
...
...
@@ -272,6 +183,8 @@ asmlinkage void __div0(void)
error
(
"Attempting division by 0!"
);
}
extern
void
do_decompress
(
u8
*
input
,
int
len
,
u8
*
output
,
void
(
*
error
)(
char
*
x
));
#ifndef STANDALONE_DEBUG
unsigned
long
...
...
@@ -292,8 +205,8 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
output_ptr
=
get_unaligned_le32
(
tmp
);
putstr
(
"Uncompressing Linux..."
);
decompress
(
input_data
,
input_data_end
-
input_data
,
NULL
,
NULL
,
output_data
,
NULL
,
error
);
d
o_d
ecompress
(
input_data
,
input_data_end
-
input_data
,
output_data
,
error
);
putstr
(
" done, booting the kernel.
\n
"
);
return
output_ptr
;
}
...
...
arch/arm/boot/compressed/vmlinux.lds.in
View file @
2741ecb4
...
...
@@ -14,6 +14,13 @@ SECTIONS
/DISCARD/ : {
*(.ARM.exidx*)
*(.ARM.extab*)
/*
* Discard any r/w data - this produces a link error if we have any,
* which is required for PIC decompression. Local data generates
* GOTOFF relocations, which prevents it being relocated independently
* of the text/got segments.
*/
*(.data)
}
. = TEXT_START;
...
...
@@ -40,7 +47,6 @@ SECTIONS
.got : { *(.got) }
_got_end = .;
.got.plt : { *(.got.plt) }
.data : { *(.data) }
_edata = .;
. = BSS_START;
...
...
arch/arm/configs/omap_4430sdp_defconfig
View file @
2741ecb4
...
...
@@ -242,10 +242,13 @@ CONFIG_CPU_CP15_MMU=y
# CONFIG_CPU_DCACHE_DISABLE is not set
# CONFIG_CPU_BPREDICT_DISABLE is not set
CONFIG_HAS_TLS_REG=y
CONFIG_OUTER_CACHE=y
CONFIG_CACHE_L2X0=y
CONFIG_ARM_L1_CACHE_SHIFT=5
# CONFIG_ARM_ERRATA_430973 is not set
# CONFIG_ARM_ERRATA_458693 is not set
# CONFIG_ARM_ERRATA_460075 is not set
CONFIG_PL310_ERRATA_588369=y
CONFIG_ARM_GIC=y
#
...
...
arch/arm/include/asm/atomic.h
View file @
2741ecb4
...
...
@@ -235,6 +235,234 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#ifndef CONFIG_GENERIC_ATOMIC64
typedef
struct
{
u64
__aligned
(
8
)
counter
;
}
atomic64_t
;
#define ATOMIC64_INIT(i) { (i) }
static
inline
u64
atomic64_read
(
atomic64_t
*
v
)
{
u64
result
;
__asm__
__volatile__
(
"@ atomic64_read
\n
"
" ldrexd %0, %H0, [%1]"
:
"=&r"
(
result
)
:
"r"
(
&
v
->
counter
)
);
return
result
;
}
static
inline
void
atomic64_set
(
atomic64_t
*
v
,
u64
i
)
{
u64
tmp
;
__asm__
__volatile__
(
"@ atomic64_set
\n
"
"1: ldrexd %0, %H0, [%1]
\n
"
" strexd %0, %2, %H2, [%1]
\n
"
" teq %0, #0
\n
"
" bne 1b"
:
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
i
)
:
"cc"
);
}
static
inline
void
atomic64_add
(
u64
i
,
atomic64_t
*
v
)
{
u64
result
;
unsigned
long
tmp
;
__asm__
__volatile__
(
"@ atomic64_add
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" adds %0, %0, %3
\n
"
" adc %H0, %H0, %H3
\n
"
" strexd %1, %0, %H0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
i
)
:
"cc"
);
}
static
inline
u64
atomic64_add_return
(
u64
i
,
atomic64_t
*
v
)
{
u64
result
;
unsigned
long
tmp
;
smp_mb
();
__asm__
__volatile__
(
"@ atomic64_add_return
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" adds %0, %0, %3
\n
"
" adc %H0, %H0, %H3
\n
"
" strexd %1, %0, %H0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
i
)
:
"cc"
);
smp_mb
();
return
result
;
}
static
inline
void
atomic64_sub
(
u64
i
,
atomic64_t
*
v
)
{
u64
result
;
unsigned
long
tmp
;
__asm__
__volatile__
(
"@ atomic64_sub
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" subs %0, %0, %3
\n
"
" sbc %H0, %H0, %H3
\n
"
" strexd %1, %0, %H0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
i
)
:
"cc"
);
}
static
inline
u64
atomic64_sub_return
(
u64
i
,
atomic64_t
*
v
)
{
u64
result
;
unsigned
long
tmp
;
smp_mb
();
__asm__
__volatile__
(
"@ atomic64_sub_return
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" subs %0, %0, %3
\n
"
" sbc %H0, %H0, %H3
\n
"
" strexd %1, %0, %H0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
i
)
:
"cc"
);
smp_mb
();
return
result
;
}
static
inline
u64
atomic64_cmpxchg
(
atomic64_t
*
ptr
,
u64
old
,
u64
new
)
{
u64
oldval
;
unsigned
long
res
;
smp_mb
();
do
{
__asm__
__volatile__
(
"@ atomic64_cmpxchg
\n
"
"ldrexd %1, %H1, [%2]
\n
"
"mov %0, #0
\n
"
"teq %1, %3
\n
"
"teqeq %H1, %H3
\n
"
"strexdeq %0, %4, %H4, [%2]"
:
"=&r"
(
res
),
"=&r"
(
oldval
)
:
"r"
(
&
ptr
->
counter
),
"r"
(
old
),
"r"
(
new
)
:
"cc"
);
}
while
(
res
);
smp_mb
();
return
oldval
;
}
static
inline
u64
atomic64_xchg
(
atomic64_t
*
ptr
,
u64
new
)
{
u64
result
;
unsigned
long
tmp
;
smp_mb
();
__asm__
__volatile__
(
"@ atomic64_xchg
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" strexd %1, %3, %H3, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
ptr
->
counter
),
"r"
(
new
)
:
"cc"
);
smp_mb
();
return
result
;
}
static
inline
u64
atomic64_dec_if_positive
(
atomic64_t
*
v
)
{
u64
result
;
unsigned
long
tmp
;
smp_mb
();
__asm__
__volatile__
(
"@ atomic64_dec_if_positive
\n
"
"1: ldrexd %0, %H0, [%2]
\n
"
" subs %0, %0, #1
\n
"
" sbc %H0, %H0, #0
\n
"
" teq %H0, #0
\n
"
" bmi 2f
\n
"
" strexd %1, %0, %H0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b
\n
"
"2:"
:
"=&r"
(
result
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
)
:
"cc"
);
smp_mb
();
return
result
;
}
static
inline
int
atomic64_add_unless
(
atomic64_t
*
v
,
u64
a
,
u64
u
)
{
u64
val
;
unsigned
long
tmp
;
int
ret
=
1
;
smp_mb
();
__asm__
__volatile__
(
"@ atomic64_add_unless
\n
"
"1: ldrexd %0, %H0, [%3]
\n
"
" teq %0, %4
\n
"
" teqeq %H0, %H4
\n
"
" moveq %1, #0
\n
"
" beq 2f
\n
"
" adds %0, %0, %5
\n
"
" adc %H0, %H0, %H5
\n
"
" strexd %2, %0, %H0, [%3]
\n
"
" teq %2, #0
\n
"
" bne 1b
\n
"
"2:"
:
"=&r"
(
val
),
"=&r"
(
ret
),
"=&r"
(
tmp
)
:
"r"
(
&
v
->
counter
),
"r"
(
u
),
"r"
(
a
)
:
"cc"
);
if
(
ret
)
smp_mb
();
return
ret
;
}
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
#else
/* !CONFIG_GENERIC_ATOMIC64 */
#include <asm-generic/atomic64.h>
#endif
#include <asm-generic/atomic-long.h>
#endif
#endif
arch/arm/include/asm/io.h
View file @
2741ecb4
...
...
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
/*
* __arm_ioremap takes CPU physical address.
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
extern
void
__iomem
*
__arm_ioremap_pfn
(
unsigned
long
,
unsigned
long
,
size_t
,
unsigned
int
);
extern
void
__iomem
*
__arm_ioremap
(
unsigned
long
,
size_t
,
unsigned
int
);
extern
void
__iomem
*
__arm_ioremap_pfn_caller
(
unsigned
long
,
unsigned
long
,
size_t
,
unsigned
int
,
void
*
);
extern
void
__iomem
*
__arm_ioremap_caller
(
unsigned
long
,
size_t
,
unsigned
int
,
void
*
);
extern
void
__iomem
*
__arm_ioremap_pfn
(
unsigned
long
,
unsigned
long
,
size_t
,
unsigned
int
);
extern
void
__iomem
*
__arm_ioremap
(
unsigned
long
,
size_t
,
unsigned
int
);
extern
void
__iounmap
(
volatile
void
__iomem
*
addr
);
/*
...
...
arch/arm/include/asm/mach/time.h
View file @
2741ecb4
...
...
@@ -46,12 +46,4 @@ struct sys_timer {
extern
struct
sys_timer
*
system_timer
;
extern
void
timer_tick
(
void
);
/*
* Kernel time keeping support.
*/
struct
timespec
;
extern
int
(
*
set_rtc
)(
void
);
extern
void
save_time_delta
(
struct
timespec
*
delta
,
struct
timespec
*
rtc
);
extern
void
restore_time_delta
(
struct
timespec
*
delta
,
struct
timespec
*
rtc
);
#endif
arch/arm/include/asm/memory.h
View file @
2741ecb4
...
...
@@ -76,6 +76,17 @@
*/
#define IOREMAP_MAX_ORDER 24
/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
#ifndef CONSISTENT_DMA_SIZE
#define CONSISTENT_DMA_SIZE SZ_2M
#endif
#define CONSISTENT_END (0xffe00000UL)
#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
#else
/* CONFIG_MMU */
/*
...
...
@@ -93,11 +104,11 @@
#endif
#ifndef PHYS_OFFSET
#define PHYS_OFFSET (CONFIG_DRAM_BASE)
#define PHYS_OFFSET
UL
(CONFIG_DRAM_BASE)
#endif
#ifndef END_MEM
#define END_MEM (
CONFIG_DRAM_BASE
+ CONFIG_DRAM_SIZE)
#define END_MEM (
UL(CONFIG_DRAM_BASE)
+ CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
...
...
@@ -112,14 +123,6 @@
#endif
/* !CONFIG_MMU */
/*
* Size of DMA-consistent memory region. Must be multiple of 2M,
* between 2MB and 14MB inclusive.
*/
#ifndef CONSISTENT_DMA_SIZE
#define CONSISTENT_DMA_SIZE SZ_2M
#endif
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
...
...
arch/arm/include/asm/mmu.h
View file @
2741ecb4
...
...
@@ -6,6 +6,7 @@
typedef
struct
{
#ifdef CONFIG_CPU_HAS_ASID
unsigned
int
id
;
spinlock_t
id_lock
;
#endif
unsigned
int
kvm_seq
;
}
mm_context_t
;
...
...
arch/arm/include/asm/mmu_context.h
View file @
2741ecb4
...
...
@@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm);
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern
unsigned
int
cpu_last_asid
;
#ifdef CONFIG_SMP
DECLARE_PER_CPU
(
struct
mm_struct
*
,
current_mm
);
#endif
void
__init_new_context
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
);
void
__new_context
(
struct
mm_struct
*
mm
);
static
inline
void
check_context
(
struct
mm_struct
*
mm
)
{
/*
* This code is executed with interrupts enabled. Therefore,
* mm->context.id cannot be updated to the latest ASID version
* on a different CPU (and condition below not triggered)
* without first getting an IPI to reset the context. The
* alternative is to take a read_lock on mm->context.id_lock
* (after changing its type to rwlock_t).
*/
if
(
unlikely
((
mm
->
context
.
id
^
cpu_last_asid
)
>>
ASID_BITS
))
__new_context
(
mm
);
...
...
@@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
__flush_icache_all
();
#endif
if
(
!
cpumask_test_and_set_cpu
(
cpu
,
mm_cpumask
(
next
))
||
prev
!=
next
)
{
#ifdef CONFIG_SMP
struct
mm_struct
**
crt_mm
=
&
per_cpu
(
current_mm
,
cpu
);
*
crt_mm
=
next
;
#endif
check_context
(
next
);
cpu_switch_mm
(
next
->
pgd
,
next
);
if
(
cache_is_vivt
())
...
...
arch/arm/include/asm/pgtable-nommu.h
View file @
2741ecb4
...
...
@@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp);
* All 32bit addresses are effectively valid for vmalloc...
* Sort of meaningless for non-VM targets.
*/
#define VMALLOC_START 0
#define VMALLOC_END 0xffffffff
#define VMALLOC_START 0
UL
#define VMALLOC_END 0xffffffff
UL
#define FIRST_USER_ADDRESS (0)
...
...
arch/arm/include/asm/setup.h
View file @
2741ecb4
...
...
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
/*
* Early command line parameters.
*/
struct
early_params
{
const
char
*
arg
;
void
(
*
fn
)(
char
**
p
);
};
#define __early_param(name,fn) \
static struct early_params __early_##fn __used \
__attribute__((__section__(".early_param.init"))) = { name, fn }
#endif
/* __KERNEL__ */
#endif
arch/arm/include/asm/spinlock.h
View file @
2741ecb4
...
...
@@ -5,6 +5,22 @@
#error SMP not supported on pre-ARMv6 CPUs
#endif
static
inline
void
dsb_sev
(
void
)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__
__volatile__
(
"dsb
\n
"
"sev"
);
#elif defined(CONFIG_CPU_32v6K)
__asm__
__volatile__
(
"mcr p15, 0, %0, c7, c10, 4
\n
"
"sev"
:
:
"r"
(
0
)
);
#endif
}
/*
* ARMv6 Spin-locking.
*
...
...
@@ -69,13 +85,11 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
__asm__
__volatile__
(
" str %1, [%0]
\n
"
#ifdef CONFIG_CPU_32v6K
" mcr p15, 0, %1, c7, c10, 4
\n
"
/* DSB */
" sev"
#endif
:
:
"r"
(
&
lock
->
lock
),
"r"
(
0
)
:
"cc"
);
dsb_sev
();
}
/*
...
...
@@ -132,13 +146,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
__asm__
__volatile__
(
"str %1, [%0]
\n
"
#ifdef CONFIG_CPU_32v6K
" mcr p15, 0, %1, c7, c10, 4
\n
"
/* DSB */
" sev
\n
"
#endif
:
:
"r"
(
&
rw
->
lock
),
"r"
(
0
)
:
"cc"
);
dsb_sev
();
}
/* write_can_lock - would write_trylock() succeed? */
...
...
@@ -188,14 +200,12 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" strex %1, %0, [%2]
\n
"
" teq %1, #0
\n
"
" bne 1b"
#ifdef CONFIG_CPU_32v6K
"
\n
cmp %0, #0
\n
"
" mcreq p15, 0, %0, c7, c10, 4
\n
"
" seveq"
#endif
:
"=&r"
(
tmp
),
"=&r"
(
tmp2
)
:
"r"
(
&
rw
->
lock
)
:
"cc"
);
if
(
tmp
==
0
)
dsb_sev
();
}
static
inline
int
arch_read_trylock
(
arch_rwlock_t
*
rw
)
...
...
arch/arm/include/asm/system.h
View file @
2741ecb4
...
...
@@ -73,8 +73,7 @@ extern unsigned int mem_fclk_21285;
struct
pt_regs
;
void
die
(
const
char
*
msg
,
struct
pt_regs
*
regs
,
int
err
)
__attribute__
((
noreturn
));
void
die
(
const
char
*
msg
,
struct
pt_regs
*
regs
,
int
err
);
struct
siginfo
;
void
arm_notify_die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
struct
siginfo
*
info
,
...
...
arch/arm/include/asm/thread_info.h
View file @
2741ecb4
...
...
@@ -115,7 +115,8 @@ extern void iwmmxt_task_restore(struct thread_info *, void *);
extern
void
iwmmxt_task_release
(
struct
thread_info
*
);
extern
void
iwmmxt_task_switch
(
struct
thread_info
*
);
extern
void
vfp_sync_state
(
struct
thread_info
*
thread
);
extern
void
vfp_sync_hwstate
(
struct
thread_info
*
);
extern
void
vfp_flush_hwstate
(
struct
thread_info
*
);
#endif
...
...
arch/arm/kernel/Makefile
View file @
2741ecb4
...
...
@@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o return_address.o setup.o signal.o
\
sys_arm.o stacktrace.o time.o traps.o
obj-$(CONFIG_LEDS)
+=
leds.o
obj-$(CONFIG_OC_ETM)
+=
etm.o
obj-$(CONFIG_ISA_DMA_API)
+=
dma.o
...
...
arch/arm/kernel/leds.c
0 → 100644
View file @
2741ecb4
/*
* LED support code, ripped out of arch/arm/kernel/time.c
*
* Copyright (C) 1994-2001 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sysdev.h>
#include <asm/leds.h>
static
void
dummy_leds_event
(
led_event_t
evt
)
{
}
void
(
*
leds_event
)(
led_event_t
)
=
dummy_leds_event
;
struct
leds_evt_name
{
const
char
name
[
8
];
int
on
;
int
off
;
};
static
const
struct
leds_evt_name
evt_names
[]
=
{
{
"amber"
,
led_amber_on
,
led_amber_off
},
{
"blue"
,
led_blue_on
,
led_blue_off
},
{
"green"
,
led_green_on
,
led_green_off
},
{
"red"
,
led_red_on
,
led_red_off
},
};
static
ssize_t
leds_store
(
struct
sys_device
*
dev
,
struct
sysdev_attribute
*
attr
,
const
char
*
buf
,
size_t
size
)
{
int
ret
=
-
EINVAL
,
len
=
strcspn
(
buf
,
" "
);
if
(
len
>
0
&&
buf
[
len
]
==
'\0'
)
len
--
;
if
(
strncmp
(
buf
,
"claim"
,
len
)
==
0
)
{
leds_event
(
led_claim
);
ret
=
size
;
}
else
if
(
strncmp
(
buf
,
"release"
,
len
)
==
0
)
{
leds_event
(
led_release
);
ret
=
size
;
}
else
{
int
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
evt_names
);
i
++
)
{
if
(
strlen
(
evt_names
[
i
].
name
)
!=
len
||
strncmp
(
buf
,
evt_names
[
i
].
name
,
len
)
!=
0
)
continue
;
if
(
strncmp
(
buf
+
len
,
" on"
,
3
)
==
0
)
{
leds_event
(
evt_names
[
i
].
on
);
ret
=
size
;
}
else
if
(
strncmp
(
buf
+
len
,
" off"
,
4
)
==
0
)
{
leds_event
(
evt_names
[
i
].
off
);
ret
=
size
;
}
break
;
}
}
return
ret
;
}
static
SYSDEV_ATTR
(
event
,
0200
,
NULL
,
leds_store
);
static
int
leds_suspend
(
struct
sys_device
*
dev
,
pm_message_t
state
)
{
leds_event
(
led_stop
);
return
0
;
}
static
int
leds_resume
(
struct
sys_device
*
dev
)
{
leds_event
(
led_start
);
return
0
;
}
static
int
leds_shutdown
(
struct
sys_device
*
dev
)
{
leds_event
(
led_halted
);
return
0
;
}
static
struct
sysdev_class
leds_sysclass
=
{
.
name
=
"leds"
,
.
shutdown
=
leds_shutdown
,
.
suspend
=
leds_suspend
,
.
resume
=
leds_resume
,
};
static
struct
sys_device
leds_device
=
{
.
id
=
0
,
.
cls
=
&
leds_sysclass
,
};
static
int
__init
leds_init
(
void
)
{
int
ret
;
ret
=
sysdev_class_register
(
&
leds_sysclass
);
if
(
ret
==
0
)
ret
=
sysdev_register
(
&
leds_device
);
if
(
ret
==
0
)
ret
=
sysdev_create_file
(
&
leds_device
,
&
attr_event
);
return
ret
;
}
device_initcall
(
leds_init
);
EXPORT_SYMBOL
(
leds_event
);
arch/arm/kernel/ptrace.c
View file @
2741ecb4
...
...
@@ -499,10 +499,41 @@ static struct undef_hook thumb_break_hook = {
.
fn
=
break_trap
,
};
static
int
thumb2_break_trap
(
struct
pt_regs
*
regs
,
unsigned
int
instr
)
{
unsigned
int
instr2
;
void
__user
*
pc
;
/* Check the second half of the instruction. */
pc
=
(
void
__user
*
)(
instruction_pointer
(
regs
)
+
2
);
if
(
processor_mode
(
regs
)
==
SVC_MODE
)
{
instr2
=
*
(
u16
*
)
pc
;
}
else
{
get_user
(
instr2
,
(
u16
__user
*
)
pc
);
}
if
(
instr2
==
0xa000
)
{
ptrace_break
(
current
,
regs
);
return
0
;
}
else
{
return
1
;
}
}
static
struct
undef_hook
thumb2_break_hook
=
{
.
instr_mask
=
0xffff
,
.
instr_val
=
0xf7f0
,
.
cpsr_mask
=
PSR_T_BIT
,
.
cpsr_val
=
PSR_T_BIT
,
.
fn
=
thumb2_break_trap
,
};
static
int
__init
ptrace_break_init
(
void
)
{
register_undef_hook
(
&
arm_break_hook
);
register_undef_hook
(
&
thumb_break_hook
);
register_undef_hook
(
&
thumb2_break_hook
);
return
0
;
}
...
...
@@ -669,7 +700,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data)
union
vfp_state
*
vfp
=
&
thread
->
vfpstate
;
struct
user_vfp
__user
*
ufp
=
data
;
vfp_sync_state
(
thread
);
vfp_sync_
hw
state
(
thread
);
/* copy the floating point registers */
if
(
copy_to_user
(
&
ufp
->
fpregs
,
&
vfp
->
hard
.
fpregs
,
...
...
@@ -692,7 +723,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
union
vfp_state
*
vfp
=
&
thread
->
vfpstate
;
struct
user_vfp
__user
*
ufp
=
data
;
vfp_sync_state
(
thread
);
vfp_sync_
hw
state
(
thread
);
/* copy the floating point registers */
if
(
copy_from_user
(
&
vfp
->
hard
.
fpregs
,
&
ufp
->
fpregs
,
...
...
@@ -703,6 +734,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data)
if
(
get_user
(
vfp
->
hard
.
fpscr
,
&
ufp
->
fpscr
))
return
-
EFAULT
;
vfp_flush_hwstate
(
thread
);
return
0
;
}
#endif
...
...
@@ -712,26 +745,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
int
ret
;
switch
(
request
)
{
/*
* read word at location "addr" in the child process.
*/
case
PTRACE_PEEKTEXT
:
case
PTRACE_PEEKDATA
:
ret
=
generic_ptrace_peekdata
(
child
,
addr
,
data
);
break
;
case
PTRACE_PEEKUSR
:
ret
=
ptrace_read_user
(
child
,
addr
,
(
unsigned
long
__user
*
)
data
);
break
;
/*
* write the word at location addr.
*/
case
PTRACE_POKETEXT
:
case
PTRACE_POKEDATA
:
ret
=
generic_ptrace_pokedata
(
child
,
addr
,
data
);
break
;
case
PTRACE_POKEUSR
:
ret
=
ptrace_write_user
(
child
,
addr
,
data
);
break
;
...
...
arch/arm/kernel/setup.c
View file @
2741ecb4
...
...
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <asm/unified.h>
#include <asm/cpu.h>
...
...
@@ -117,7 +118,7 @@ EXPORT_SYMBOL(elf_platform);
static
const
char
*
cpu_name
;
static
const
char
*
machine_name
;
static
char
__initdata
c
omman
d_line
[
COMMAND_LINE_SIZE
];
static
char
__initdata
c
m
d_line
[
COMMAND_LINE_SIZE
];
static
char
default_command_line
[
COMMAND_LINE_SIZE
]
__initdata
=
CONFIG_CMDLINE
;
static
union
{
char
c
[
4
];
unsigned
long
l
;
}
endian_test
__initdata
=
{
{
'l'
,
'?'
,
'?'
,
'b'
}
};
...
...
@@ -417,10 +418,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
static
void
__init
early_mem
(
char
*
*
p
)
static
int
__init
early_mem
(
char
*
p
)
{
static
int
usermem
__initdata
=
0
;
unsigned
long
size
,
start
;
char
*
endp
;
/*
* If the user specifies memory size, we
...
...
@@ -433,52 +435,15 @@ static void __init early_mem(char **p)
}
start
=
PHYS_OFFSET
;
size
=
memparse
(
*
p
,
p
);
if
(
*
*
p
==
'@'
)
start
=
memparse
(
*
p
+
1
,
p
);
size
=
memparse
(
p
,
&
end
p
);
if
(
*
end
p
==
'@'
)
start
=
memparse
(
endp
+
1
,
NULL
);
arm_add_memory
(
start
,
size
);
}
__early_param
(
"mem="
,
early_mem
);
/*
* Initial parsing of the command line.
*/
static
void
__init
parse_cmdline
(
char
**
cmdline_p
,
char
*
from
)
{
char
c
=
' '
,
*
to
=
command_line
;
int
len
=
0
;
for
(;;)
{
if
(
c
==
' '
)
{
extern
struct
early_params
__early_begin
,
__early_end
;
struct
early_params
*
p
;
for
(
p
=
&
__early_begin
;
p
<
&
__early_end
;
p
++
)
{
int
arglen
=
strlen
(
p
->
arg
);
if
(
memcmp
(
from
,
p
->
arg
,
arglen
)
==
0
)
{
if
(
to
!=
command_line
)
to
-=
1
;
from
+=
arglen
;
p
->
fn
(
&
from
);
while
(
*
from
!=
' '
&&
*
from
!=
'\0'
)
from
++
;
break
;
}
}
}
c
=
*
from
++
;
if
(
!
c
)
break
;
if
(
COMMAND_LINE_SIZE
<=
++
len
)
break
;
*
to
++
=
c
;
}
*
to
=
'\0'
;
*
cmdline_p
=
command_line
;
return
0
;
}
early_param
(
"mem"
,
early_mem
);
static
void
__init
setup_ramdisk
(
int
doload
,
int
prompt
,
int
image_start
,
unsigned
int
rd_sz
)
...
...
@@ -739,9 +704,15 @@ void __init setup_arch(char **cmdline_p)
init_mm
.
end_data
=
(
unsigned
long
)
_edata
;
init_mm
.
brk
=
(
unsigned
long
)
_end
;
memcpy
(
boot_command_line
,
from
,
COMMAND_LINE_SIZE
);
boot_command_line
[
COMMAND_LINE_SIZE
-
1
]
=
'\0'
;
parse_cmdline
(
cmdline_p
,
from
);
/* parse_early_param needs a boot_command_line */
strlcpy
(
boot_command_line
,
from
,
COMMAND_LINE_SIZE
);
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy
(
cmd_line
,
boot_command_line
,
COMMAND_LINE_SIZE
);
*
cmdline_p
=
cmd_line
;
parse_early_param
();
paging_init
(
mdesc
);
request_standard_resources
(
&
meminfo
,
mdesc
);
...
...
@@ -782,9 +753,21 @@ static int __init topology_init(void)
return
0
;
}
subsys_initcall
(
topology_init
);
#ifdef CONFIG_HAVE_PROC_CPU
static
int
__init
proc_cpu_init
(
void
)
{
struct
proc_dir_entry
*
res
;
res
=
proc_mkdir
(
"cpu"
,
NULL
);
if
(
!
res
)
return
-
ENOMEM
;
return
0
;
}
fs_initcall
(
proc_cpu_init
);
#endif
static
const
char
*
hwcap_str
[]
=
{
"swp"
,
"half"
,
...
...
arch/arm/kernel/time.c
View file @
2741ecb4
...
...
@@ -10,11 +10,6 @@
*
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
*
* 1994-07-02 Alan Modra
* fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
* 1998-12-20 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/module.h>
#include <linux/kernel.h>
...
...
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL
(
profile_pc
);
#endif
/*
* hook for setting the RTC's idea of the current time.
*/
int
(
*
set_rtc
)(
void
);
#ifndef CONFIG_GENERIC_TIME
static
unsigned
long
dummy_gettimeoffset
(
void
)
{
...
...
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void)
}
#endif
static
unsigned
long
next_rtc_update
;
/*
* If we have an externally synchronized linux clock, then update
* CMOS clock accordingly every ~11 minutes. set_rtc() has to be
* called as close as possible to 500 ms before the new second
* starts.
*/
static
inline
void
do_set_rtc
(
void
)
{
if
(
!
ntp_synced
()
||
set_rtc
==
NULL
)
return
;
if
(
next_rtc_update
&&
time_before
((
unsigned
long
)
xtime
.
tv_sec
,
next_rtc_update
))
return
;
if
(
xtime
.
tv_nsec
<
500000000
-
((
unsigned
)
tick_nsec
>>
1
)
&&
xtime
.
tv_nsec
>=
500000000
+
((
unsigned
)
tick_nsec
>>
1
))
return
;
if
(
set_rtc
())
/*
* rtc update failed. Try again in 60s
*/
next_rtc_update
=
xtime
.
tv_sec
+
60
;
else
next_rtc_update
=
xtime
.
tv_sec
+
660
;
}
#ifdef CONFIG_LEDS
static
void
dummy_leds_event
(
led_event_t
evt
)
{
}
void
(
*
leds_event
)(
led_event_t
)
=
dummy_leds_event
;
struct
leds_evt_name
{
const
char
name
[
8
];
int
on
;
int
off
;
};
static
const
struct
leds_evt_name
evt_names
[]
=
{
{
"amber"
,
led_amber_on
,
led_amber_off
},
{
"blue"
,
led_blue_on
,
led_blue_off
},
{
"green"
,
led_green_on
,
led_green_off
},
{
"red"
,
led_red_on
,
led_red_off
},
};
static
ssize_t
leds_store
(
struct
sys_device
*
dev
,
struct
sysdev_attribute
*
attr
,
const
char
*
buf
,
size_t
size
)
{
int
ret
=
-
EINVAL
,
len
=
strcspn
(
buf
,
" "
);
if
(
len
>
0
&&
buf
[
len
]
==
'\0'
)
len
--
;
if
(
strncmp
(
buf
,
"claim"
,
len
)
==
0
)
{
leds_event
(
led_claim
);
ret
=
size
;
}
else
if
(
strncmp
(
buf
,
"release"
,
len
)
==
0
)
{
leds_event
(
led_release
);
ret
=
size
;
}
else
{
int
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
evt_names
);
i
++
)
{
if
(
strlen
(
evt_names
[
i
].
name
)
!=
len
||
strncmp
(
buf
,
evt_names
[
i
].
name
,
len
)
!=
0
)
continue
;
if
(
strncmp
(
buf
+
len
,
" on"
,
3
)
==
0
)
{
leds_event
(
evt_names
[
i
].
on
);
ret
=
size
;
}
else
if
(
strncmp
(
buf
+
len
,
" off"
,
4
)
==
0
)
{
leds_event
(
evt_names
[
i
].
off
);
ret
=
size
;
}
break
;
}
}
return
ret
;
}
static
SYSDEV_ATTR
(
event
,
0200
,
NULL
,
leds_store
);
static
int
leds_suspend
(
struct
sys_device
*
dev
,
pm_message_t
state
)
{
leds_event
(
led_stop
);
return
0
;
}
static
int
leds_resume
(
struct
sys_device
*
dev
)
{
leds_event
(
led_start
);
return
0
;
}
static
int
leds_shutdown
(
struct
sys_device
*
dev
)
{
leds_event
(
led_halted
);
return
0
;
}
static
struct
sysdev_class
leds_sysclass
=
{
.
name
=
"leds"
,
.
shutdown
=
leds_shutdown
,
.
suspend
=
leds_suspend
,
.
resume
=
leds_resume
,
};
static
struct
sys_device
leds_device
=
{
.
id
=
0
,
.
cls
=
&
leds_sysclass
,
};
static
int
__init
leds_init
(
void
)
{
int
ret
;
ret
=
sysdev_class_register
(
&
leds_sysclass
);
if
(
ret
==
0
)
ret
=
sysdev_register
(
&
leds_device
);
if
(
ret
==
0
)
ret
=
sysdev_create_file
(
&
leds_device
,
&
attr_event
);
return
ret
;
}
device_initcall
(
leds_init
);
EXPORT_SYMBOL
(
leds_event
);
#endif
#ifdef CONFIG_LEDS_TIMER
static
inline
void
do_leds
(
void
)
{
...
...
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL
(
do_settimeofday
);
#endif
/* !CONFIG_GENERIC_TIME */
/**
* save_time_delta - Save the offset between system time and RTC time
* @delta: pointer to timespec to store delta
* @rtc: pointer to timespec for current RTC time
*
* Return a delta between the system time and the RTC time, such
* that system time can be restored later with restore_time_delta()
*/
void
save_time_delta
(
struct
timespec
*
delta
,
struct
timespec
*
rtc
)
{
set_normalized_timespec
(
delta
,
xtime
.
tv_sec
-
rtc
->
tv_sec
,
xtime
.
tv_nsec
-
rtc
->
tv_nsec
);
}
EXPORT_SYMBOL
(
save_time_delta
);
/**
* restore_time_delta - Restore the current system time
* @delta: delta returned by save_time_delta()
* @rtc: pointer to timespec for current RTC time
*/
void
restore_time_delta
(
struct
timespec
*
delta
,
struct
timespec
*
rtc
)
{
struct
timespec
ts
;
set_normalized_timespec
(
&
ts
,
delta
->
tv_sec
+
rtc
->
tv_sec
,
delta
->
tv_nsec
+
rtc
->
tv_nsec
);
do_settimeofday
(
&
ts
);
}
EXPORT_SYMBOL
(
restore_time_delta
);
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* Kernel system timer support.
...
...
@@ -336,7 +159,6 @@ void timer_tick(void)
{
profile_tick
(
CPU_PROFILING
);
do_leds
();
do_set_rtc
();
write_seqlock
(
&
xtime_lock
);
do_timer
(
1
);
write_sequnlock
(
&
xtime_lock
);
...
...
arch/arm/kernel/traps.c
View file @
2741ecb4
...
...
@@ -12,15 +12,17 @@
* 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
* kill the offending process.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/spinlock.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
...
...
@@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#define S_SMP ""
#endif
static
void
__die
(
const
char
*
str
,
int
err
,
struct
thread_info
*
thread
,
struct
pt_regs
*
regs
)
static
int
__die
(
const
char
*
str
,
int
err
,
struct
thread_info
*
thread
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
tsk
=
thread
->
task
;
static
int
die_counter
;
int
ret
;
printk
(
KERN_EMERG
"Internal error: %s: %x [#%d]"
S_PREEMPT
S_SMP
"
\n
"
,
str
,
err
,
++
die_counter
);
sysfs_printk_last_file
();
/* trap and error numbers are mostly meaningless on ARM */
ret
=
notify_die
(
DIE_OOPS
,
str
,
regs
,
err
,
tsk
->
thread
.
trap_no
,
SIGSEGV
);
if
(
ret
==
NOTIFY_STOP
)
return
ret
;
print_modules
();
__show_regs
(
regs
);
printk
(
KERN_EMERG
"Process %.*s (pid: %d, stack limit = 0x%p)
\n
"
,
...
...
@@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
dump_backtrace
(
regs
,
tsk
);
dump_instr
(
KERN_EMERG
,
regs
);
}
return
ret
;
}
DEFINE_SPINLOCK
(
die_lock
);
...
...
@@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock);
/*
* This function is protected against re-entrancy.
*/
NORET_TYPE
void
die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
int
err
)
void
die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
int
err
)
{
struct
thread_info
*
thread
=
current_thread_info
();
int
ret
;
oops_enter
();
spin_lock_irq
(
&
die_lock
);
console_verbose
();
bust_spinlocks
(
1
);
__die
(
str
,
err
,
thread
,
regs
);
ret
=
__die
(
str
,
err
,
thread
,
regs
);
if
(
regs
&&
kexec_should_crash
(
thread
->
task
))
crash_kexec
(
regs
);
bust_spinlocks
(
0
);
add_taint
(
TAINT_DIE
);
spin_unlock_irq
(
&
die_lock
);
...
...
@@ -267,10 +283,9 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
if
(
in_interrupt
())
panic
(
"Fatal exception in interrupt"
);
if
(
panic_on_oops
)
panic
(
"Fatal exception"
);
if
(
ret
!=
NOTIFY_STOP
)
do_exit
(
SIGSEGV
);
}
...
...
arch/arm/kernel/vmlinux.lds.S
View file @
2741ecb4
...
...
@@ -43,10 +43,6 @@ SECTIONS
INIT_SETUP
(16)
__early_begin
=
.
;
*(.
early_param
.
init
)
__early_end
=
.
;
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
...
...
arch/arm/mach-davinci/include/mach/hardware.h
View file @
2741ecb4
...
...
@@ -27,7 +27,7 @@
/*
* I/O mapping
*/
#define IO_PHYS 0x01c00000
#define IO_PHYS 0x01c00000
UL
#define IO_OFFSET 0xfd000000
/* Virtual IO = 0xfec00000 */
#define IO_SIZE 0x00400000
#define IO_VIRT (IO_PHYS + IO_OFFSET)
...
...
arch/arm/mach-davinci/io.c
View file @
2741ecb4
...
...
@@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
if
(
BETWEEN
(
p
,
IO_PHYS
,
IO_SIZE
))
return
XLATE
(
p
,
IO_PHYS
,
IO_VIRT
);
return
__arm_ioremap
(
p
,
size
,
type
);
return
__arm_ioremap
_caller
(
p
,
size
,
type
,
__builtin_return_address
(
0
)
);
}
EXPORT_SYMBOL
(
davinci_ioremap
);
...
...
arch/arm/mach-dove/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-dove/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfd800000
#define VMALLOC_END 0xfd800000
UL
arch/arm/mach-ep93xx/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-ep93xx/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe800000
#define VMALLOC_END 0xfe800000
UL
arch/arm/mach-footbridge/common.c
View file @
2741ecb4
...
...
@@ -32,12 +32,13 @@ unsigned int mem_fclk_21285 = 50000000;
EXPORT_SYMBOL
(
mem_fclk_21285
);
static
void
__init
early_fclk
(
char
*
*
arg
)
static
int
__init
early_fclk
(
char
*
arg
)
{
mem_fclk_21285
=
simple_strtoul
(
*
arg
,
arg
,
0
);
mem_fclk_21285
=
simple_strtoul
(
arg
,
NULL
,
0
);
return
0
;
}
__early_param
(
"mem_fclk_21285=
"
,
early_fclk
);
early_param
(
"mem_fclk_21285
"
,
early_fclk
);
static
int
__init
parse_tag_memclk
(
const
struct
tag
*
tag
)
{
...
...
arch/arm/mach-gemini/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -7,4 +7,4 @@
* (at your option) any later version.
*/
#define VMALLOC_END 0x
F0000000
#define VMALLOC_END 0x
f0000000UL
arch/arm/mach-iop13xx/io.c
View file @
2741ecb4
...
...
@@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
(
cookie
-
IOP13XX_PCIE_LOWER_MEM_RA
));
break
;
case
IOP13XX_PBI_LOWER_MEM_RA
...
IOP13XX_PBI_UPPER_MEM_RA
:
retval
=
__arm_ioremap
(
IOP13XX_PBI_LOWER_MEM_PA
+
retval
=
__arm_ioremap
_caller
(
IOP13XX_PBI_LOWER_MEM_PA
+
(
cookie
-
IOP13XX_PBI_LOWER_MEM_RA
),
size
,
mtype
);
size
,
mtype
,
__builtin_return_address
(
0
)
);
break
;
case
IOP13XX_PCIE_LOWER_IO_PA
...
IOP13XX_PCIE_UPPER_IO_PA
:
retval
=
(
void
*
)
IOP13XX_PCIE_IO_PHYS_TO_VIRT
(
cookie
);
...
...
@@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
retval
=
(
void
*
)
IOP13XX_PMMR_PHYS_TO_VIRT
(
cookie
);
break
;
default:
retval
=
__arm_ioremap
(
cookie
,
size
,
mtype
);
retval
=
__arm_ioremap_caller
(
cookie
,
size
,
mtype
,
__builtin_return_address
(
0
));
}
return
retval
;
...
...
arch/arm/mach-iop32x/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-iop32x/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe000000
#define VMALLOC_END 0xfe000000
UL
arch/arm/mach-iop33x/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-iop33x/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe000000
#define VMALLOC_END 0xfe000000
UL
arch/arm/mach-ixp2000/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -17,4 +17,4 @@
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_END 0xfb000000
#define VMALLOC_END 0xfb000000
UL
arch/arm/mach-ixp23xx/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -7,4 +7,4 @@
* specific static I/O.
*/
#define VMALLOC_END (0xec000000)
#define VMALLOC_END (0xec000000
UL
)
arch/arm/mach-ixp4xx/include/mach/vmalloc.h
View file @
2741ecb4
/*
* arch/arm/mach-ixp4xx/include/mach/vmalloc.h
*/
#define VMALLOC_END (0x
FF000000
)
#define VMALLOC_END (0x
ff000000UL
)
arch/arm/mach-kirkwood/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-kirkwood/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe800000
#define VMALLOC_END 0xfe800000
UL
arch/arm/mach-lh7a40x/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -7,4 +7,4 @@
* version 2 as published by the Free Software Foundation.
*
*/
#define VMALLOC_END (0xe8000000)
#define VMALLOC_END (0xe8000000
UL
)
arch/arm/mach-loki/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-loki/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe800000
#define VMALLOC_END 0xfe800000
UL
arch/arm/mach-mmp/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* linux/arch/arm/mach-mmp/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe000000
#define VMALLOC_END 0xfe000000
UL
arch/arm/mach-msm/io.c
View file @
2741ecb4
...
...
@@ -76,5 +76,6 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
mtype
=
MT_DEVICE_NONSHARED
;
}
return
__arm_ioremap
(
phys_addr
,
size
,
mtype
);
return
__arm_ioremap_caller
(
phys_addr
,
size
,
mtype
,
__builtin_return_address
(
0
));
}
arch/arm/mach-mv78xx0/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-mv78xx0/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe000000
#define VMALLOC_END 0xfe000000
UL
arch/arm/mach-nomadik/include/mach/vmalloc.h
View file @
2741ecb4
#define VMALLOC_END 0xe8000000
#define VMALLOC_END 0xe8000000
UL
arch/arm/mach-ns9xxx/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -11,6 +11,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
#define VMALLOC_END (0xf0000000)
#define VMALLOC_END (0xf0000000
UL
)
#endif
/* ifndef __ASM_ARCH_VMALLOC_H */
arch/arm/mach-omap2/board-4430sdp.c
View file @
2741ecb4
...
...
@@ -28,6 +28,7 @@
#include <plat/control.h>
#include <plat/timer-gp.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
static
struct
platform_device
sdp4430_lcd_device
=
{
.
name
=
"sdp4430_lcd"
,
...
...
@@ -50,6 +51,59 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = {
{
OMAP_TAG_LCD
,
&
sdp4430_lcd_config
},
};
#ifdef CONFIG_CACHE_L2X0
noinline
void
omap_smc1
(
u32
fn
,
u32
arg
)
{
register
u32
r12
asm
(
"r12"
)
=
fn
;
register
u32
r0
asm
(
"r0"
)
=
arg
;
/* This is common routine cache secure monitor API used to
* modify the PL310 secure registers.
* r0 contains the value to be modified and "r12" contains
* the monitor API number. It uses few CPU registers
* internally and hence they need be backed up including
* link register "lr".
* Explicitly save r11 and r12 the compiler generated code
* won't save it.
*/
asm
volatile
(
"stmfd r13!, {r11,r12}
\n
"
"dsb
\n
"
"smc
\n
"
"ldmfd r13!, {r11,r12}
\n
"
:
"+r"
(
r0
),
"+r"
(
r12
)
:
:
"r4"
,
"r5"
,
"r10"
,
"lr"
,
"cc"
);
}
EXPORT_SYMBOL
(
omap_smc1
);
static
int
__init
omap_l2_cache_init
(
void
)
{
void
__iomem
*
l2cache_base
;
/* To avoid code running on other OMAPs in
* multi-omap builds
*/
if
(
!
cpu_is_omap44xx
())
return
-
ENODEV
;
/* Static mapping, never released */
l2cache_base
=
ioremap
(
OMAP44XX_L2CACHE_BASE
,
SZ_4K
);
BUG_ON
(
!
l2cache_base
);
/* Enable PL310 L2 Cache controller */
omap_smc1
(
0x102
,
0x1
);
/* 32KB way size, 16-way associativity,
* parity disabled
*/
l2x0_init
(
l2cache_base
,
0x0e050000
,
0xc0000fff
);
return
0
;
}
early_initcall
(
omap_l2_cache_init
);
#endif
static
void
__init
gic_init_irq
(
void
)
{
void
__iomem
*
base
;
...
...
arch/arm/mach-orion5x/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -2,4 +2,4 @@
* arch/arm/mach-orion5x/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfd800000
#define VMALLOC_END 0xfd800000
UL
arch/arm/mach-pxa/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -8,4 +8,4 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define VMALLOC_END (0xe8000000)
#define VMALLOC_END (0xe8000000
UL
)
arch/arm/mach-realview/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -18,4 +18,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define VMALLOC_END 0xf8000000
#define VMALLOC_END 0xf8000000
UL
arch/arm/mach-s3c24a0/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -12,6 +12,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
#define VMALLOC_END (0x
E0000000
)
#define VMALLOC_END (0x
e0000000UL
)
#endif
/* __ASM_ARCH_VMALLOC_H */
arch/arm/mach-sa1100/include/mach/vmalloc.h
View file @
2741ecb4
/*
* arch/arm/mach-sa1100/include/mach/vmalloc.h
*/
#define VMALLOC_END (0xe8000000)
#define VMALLOC_END (0xe8000000
UL
)
arch/arm/mach-u300/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -9,4 +9,4 @@
* End must be above the I/O registers and on an even 2MiB boundary.
* Author: Linus Walleij <linus.walleij@stericsson.com>
*/
#define VMALLOC_END 0xfe800000
#define VMALLOC_END 0xfe800000
UL
arch/arm/mach-ux500/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -15,4 +15,4 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define VMALLOC_END 0xf0000000
#define VMALLOC_END 0xf0000000
UL
arch/arm/mach-w90x900/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -18,6 +18,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
#define VMALLOC_END (0x
E0000000
)
#define VMALLOC_END (0x
e0000000UL
)
#endif
/* __ASM_ARCH_VMALLOC_H */
arch/arm/mm/Kconfig
View file @
2741ecb4
...
...
@@ -399,7 +399,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6
default y if SMP && !
ARCH_MX3
default y if SMP && !
(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
...
...
@@ -410,7 +410,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
select CPU_32v6K
select CPU_32v6K
if !ARCH_OMAP2
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
...
...
@@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
|| ARCH_OMAP4
default y
select OUTER_CACHE
help
...
...
@@ -779,5 +779,5 @@ config CACHE_XSC3L2
config ARM_L1_CACHE_SHIFT
int
default 6 if AR
CH_OMAP3 || ARCH_S5PC1XX
default 6 if AR
M_L1_CACHE_SHIFT_6
default 5
arch/arm/mm/alignment.c
View file @
2741ecb4
...
...
@@ -898,11 +898,7 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct
proc_dir_entry
*
res
;
res
=
proc_mkdir
(
"cpu"
,
NULL
);
if
(
!
res
)
return
-
ENOMEM
;
res
=
create_proc_entry
(
"alignment"
,
S_IWUSR
|
S_IRUGO
,
res
);
res
=
create_proc_entry
(
"cpu/alignment"
,
S_IWUSR
|
S_IRUGO
,
NULL
);
if
(
!
res
)
return
-
ENOMEM
;
...
...
arch/arm/mm/cache-l2x0.c
View file @
2741ecb4
...
...
@@ -42,6 +42,57 @@ static inline void cache_sync(void)
cache_wait
(
base
+
L2X0_CACHE_SYNC
,
1
);
}
static
inline
void
l2x0_clean_line
(
unsigned
long
addr
)
{
void
__iomem
*
base
=
l2x0_base
;
cache_wait
(
base
+
L2X0_CLEAN_LINE_PA
,
1
);
writel
(
addr
,
base
+
L2X0_CLEAN_LINE_PA
);
}
static
inline
void
l2x0_inv_line
(
unsigned
long
addr
)
{
void
__iomem
*
base
=
l2x0_base
;
cache_wait
(
base
+
L2X0_INV_LINE_PA
,
1
);
writel
(
addr
,
base
+
L2X0_INV_LINE_PA
);
}
#ifdef CONFIG_PL310_ERRATA_588369
static
void
debug_writel
(
unsigned
long
val
)
{
extern
void
omap_smc1
(
u32
fn
,
u32
arg
);
/*
* Texas Instrument secure monitor api to modify the
* PL310 Debug Control Register.
*/
omap_smc1
(
0x100
,
val
);
}
static
inline
void
l2x0_flush_line
(
unsigned
long
addr
)
{
void
__iomem
*
base
=
l2x0_base
;
/* Clean by PA followed by Invalidate by PA */
cache_wait
(
base
+
L2X0_CLEAN_LINE_PA
,
1
);
writel
(
addr
,
base
+
L2X0_CLEAN_LINE_PA
);
cache_wait
(
base
+
L2X0_INV_LINE_PA
,
1
);
writel
(
addr
,
base
+
L2X0_INV_LINE_PA
);
}
#else
/* Optimised out for non-errata case */
static
inline
void
debug_writel
(
unsigned
long
val
)
{
}
static
inline
void
l2x0_flush_line
(
unsigned
long
addr
)
{
void
__iomem
*
base
=
l2x0_base
;
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
addr
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
}
#endif
static
inline
void
l2x0_inv_all
(
void
)
{
unsigned
long
flags
;
...
...
@@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
if
(
start
&
(
CACHE_LINE_SIZE
-
1
))
{
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
debug_writel
(
0x03
);
l2x0_flush_line
(
start
);
debug_writel
(
0x00
);
start
+=
CACHE_LINE_SIZE
;
}
if
(
end
&
(
CACHE_LINE_SIZE
-
1
))
{
end
&=
~
(
CACHE_LINE_SIZE
-
1
);
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
end
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
debug_writel
(
0x03
);
l2x0_flush_line
(
end
);
debug_writel
(
0x00
);
}
while
(
start
<
end
)
{
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_INV_LINE_PA
);
l2x0_inv_line
(
start
);
start
+=
CACHE_LINE_SIZE
;
}
...
...
@@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_CLEAN_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_LINE_PA
);
l2x0_clean_line
(
start
);
start
+=
CACHE_LINE_SIZE
;
}
...
...
@@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
while
(
start
<
end
)
{
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
debug_writel
(
0x03
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
l2x0_flush_line
(
start
);
start
+=
CACHE_LINE_SIZE
;
}
debug_writel
(
0x00
);
if
(
blk_end
<
end
)
{
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
...
...
arch/arm/mm/context.c
View file @
2741ecb4
...
...
@@ -10,12 +10,17 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static
DEFINE_SPINLOCK
(
cpu_asid_lock
);
unsigned
int
cpu_last_asid
=
ASID_FIRST_VERSION
;
#ifdef CONFIG_SMP
DEFINE_PER_CPU
(
struct
mm_struct
*
,
current_mm
);
#endif
/*
* We fork()ed a process, and we need a new context for the child
...
...
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
void
__init_new_context
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
mm
->
context
.
id
=
0
;
spin_lock_init
(
&
mm
->
context
.
id_lock
);
}
static
void
flush_context
(
void
)
{
/* set the reserved ASID before flushing the TLB */
asm
(
"mcr p15, 0, %0, c13, c0, 1
\n
"
:
:
"r"
(
0
));
isb
();
local_flush_tlb_all
();
if
(
icache_is_vivt_asid_tagged
())
{
__flush_icache_all
();
dsb
();
}
}
#ifdef CONFIG_SMP
static
void
set_mm_context
(
struct
mm_struct
*
mm
,
unsigned
int
asid
)
{
unsigned
long
flags
;
/*
* Locking needed for multi-threaded applications where the
* same mm->context.id could be set from different CPUs during
* the broadcast. This function is also called via IPI so the
* mm->context.id_lock has to be IRQ-safe.
*/
spin_lock_irqsave
(
&
mm
->
context
.
id_lock
,
flags
);
if
(
likely
((
mm
->
context
.
id
^
cpu_last_asid
)
>>
ASID_BITS
))
{
/*
* Old version of ASID found. Set the new one and
* reset mm_cpumask(mm).
*/
mm
->
context
.
id
=
asid
;
cpumask_clear
(
mm_cpumask
(
mm
));
}
spin_unlock_irqrestore
(
&
mm
->
context
.
id_lock
,
flags
);
/*
* Set the mm_cpumask(mm) bit for the current CPU.
*/
cpumask_set_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
}
/*
* Reset the ASID on the current CPU. This function call is broadcast
* from the CPU handling the ASID rollover and holding cpu_asid_lock.
*/
static
void
reset_context
(
void
*
info
)
{
unsigned
int
asid
;
unsigned
int
cpu
=
smp_processor_id
();
struct
mm_struct
*
mm
=
per_cpu
(
current_mm
,
cpu
);
/*
* Check if a current_mm was set on this CPU as it might still
* be in the early booting stages and using the reserved ASID.
*/
if
(
!
mm
)
return
;
smp_rmb
();
asid
=
cpu_last_asid
+
cpu
+
1
;
flush_context
();
set_mm_context
(
mm
,
asid
);
/* set the new ASID */
asm
(
"mcr p15, 0, %0, c13, c0, 1
\n
"
:
:
"r"
(
mm
->
context
.
id
));
isb
();
}
#else
static
inline
void
set_mm_context
(
struct
mm_struct
*
mm
,
unsigned
int
asid
)
{
mm
->
context
.
id
=
asid
;
cpumask_copy
(
mm_cpumask
(
mm
),
cpumask_of
(
smp_processor_id
()));
}
#endif
void
__new_context
(
struct
mm_struct
*
mm
)
{
unsigned
int
asid
;
spin_lock
(
&
cpu_asid_lock
);
#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from
* another CPU before we acquired the lock.
*/
if
(
unlikely
(((
mm
->
context
.
id
^
cpu_last_asid
)
>>
ASID_BITS
)
==
0
))
{
cpumask_set_cpu
(
smp_processor_id
(),
mm_cpumask
(
mm
));
spin_unlock
(
&
cpu_asid_lock
);
return
;
}
#endif
/*
* At this point, it is guaranteed that the current mm (with
* an old ASID) isn't active on any other CPU since the ASIDs
* are changed simultaneously via IPI.
*/
asid
=
++
cpu_last_asid
;
if
(
asid
==
0
)
asid
=
cpu_last_asid
=
ASID_FIRST_VERSION
;
...
...
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if
(
unlikely
((
asid
&
~
ASID_MASK
)
==
0
))
{
asid
=
++
cpu_last_asid
;
/* set the reserved ASID before flushing the TLB */
asm
(
"mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID
\n
"
:
:
"r"
(
0
));
isb
();
flush_tlb_all
();
if
(
icache_is_vivt_asid_tagged
())
{
__flush_icache_all
();
dsb
();
asid
=
cpu_last_asid
+
smp_processor_id
()
+
1
;
flush_context
();
#ifdef CONFIG_SMP
smp_wmb
();
smp_call_function
(
reset_context
,
NULL
,
1
);
#endif
cpu_last_asid
+=
NR_CPUS
;
}
}
spin_unlock
(
&
cpu_asid_lock
);
cpumask_copy
(
mm_cpumask
(
mm
),
cpumask_of
(
smp_processor_id
())
);
mm
->
context
.
id
=
asid
;
set_mm_context
(
mm
,
asid
);
spin_unlock
(
&
cpu_asid_lock
)
;
}
arch/arm/mm/dma-mapping.c
View file @
2741ecb4
...
...
@@ -29,9 +29,6 @@
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
#define CONSISTENT_END (0xffe00000)
#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
...
...
arch/arm/mm/init.c
View file @
2741ecb4
...
...
@@ -23,6 +23,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
...
...
@@ -32,19 +33,21 @@
static
unsigned
long
phys_initrd_start
__initdata
=
0
;
static
unsigned
long
phys_initrd_size
__initdata
=
0
;
static
void
__init
early_initrd
(
char
*
*
p
)
static
int
__init
early_initrd
(
char
*
p
)
{
unsigned
long
start
,
size
;
char
*
endp
;
start
=
memparse
(
*
p
,
p
);
if
(
*
*
p
==
','
)
{
size
=
memparse
(
(
*
p
)
+
1
,
p
);
start
=
memparse
(
p
,
&
end
p
);
if
(
*
end
p
==
','
)
{
size
=
memparse
(
endp
+
1
,
NULL
);
phys_initrd_start
=
start
;
phys_initrd_size
=
size
;
}
return
0
;
}
__early_param
(
"initrd=
"
,
early_initrd
);
early_param
(
"initrd
"
,
early_initrd
);
static
int
__init
parse_tag_initrd
(
const
struct
tag
*
tag
)
{
...
...
@@ -560,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
*/
void
__init
mem_init
(
void
)
{
unsigned
int
codesize
,
datasize
,
initsize
;
unsigned
long
reserved_pages
,
free_pages
;
int
i
,
node
;
#ifndef CONFIG_DISCONTIGMEM
...
...
@@ -596,6 +599,33 @@ void __init mem_init(void)
totalram_pages
+=
totalhigh_pages
;
#endif
reserved_pages
=
free_pages
=
0
;
for_each_online_node
(
node
)
{
pg_data_t
*
n
=
NODE_DATA
(
node
);
struct
page
*
map
=
pgdat_page_nr
(
n
,
0
)
-
n
->
node_start_pfn
;
for_each_nodebank
(
i
,
&
meminfo
,
node
)
{
struct
membank
*
bank
=
&
meminfo
.
bank
[
i
];
unsigned
int
pfn1
,
pfn2
;
struct
page
*
page
,
*
end
;
pfn1
=
bank_pfn_start
(
bank
);
pfn2
=
bank_pfn_end
(
bank
);
page
=
map
+
pfn1
;
end
=
map
+
pfn2
;
do
{
if
(
PageReserved
(
page
))
reserved_pages
++
;
else
if
(
!
page_count
(
page
))
free_pages
++
;
page
++
;
}
while
(
page
<
end
);
}
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
...
...
@@ -608,16 +638,71 @@ void __init mem_init(void)
}
printk
(
" = %luMB total
\n
"
,
num_physpages
>>
(
20
-
PAGE_SHIFT
));
codesize
=
_etext
-
_text
;
datasize
=
_end
-
_data
;
initsize
=
__init_end
-
__init_begin
;
printk
(
KERN_NOTICE
"Memory: %luKB available (%dK code, "
"%dK data, %dK init, %luK highmem)
\n
"
,
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
codesize
>>
10
,
datasize
>>
10
,
initsize
>>
10
,
printk
(
KERN_NOTICE
"Memory: %luk/%luk available, %luk reserved, %luK highmem
\n
"
,
nr_free_pages
()
<<
(
PAGE_SHIFT
-
10
),
free_pages
<<
(
PAGE_SHIFT
-
10
),
reserved_pages
<<
(
PAGE_SHIFT
-
10
),
totalhigh_pages
<<
(
PAGE_SHIFT
-
10
));
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
printk
(
KERN_NOTICE
"Virtual kernel memory layout:
\n
"
" vector : 0x%08lx - 0x%08lx (%4ld kB)
\n
"
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)
\n
"
#ifdef CONFIG_MMU
" DMA : 0x%08lx - 0x%08lx (%4ld MB)
\n
"
#endif
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)
\n
"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)
\n
"
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)
\n
"
#endif
" modules : 0x%08lx - 0x%08lx (%4ld MB)
\n
"
" .init : 0x%p"
" - 0x%p"
" (%4d kB)
\n
"
" .text : 0x%p"
" - 0x%p"
" (%4d kB)
\n
"
" .data : 0x%p"
" - 0x%p"
" (%4d kB)
\n
"
,
MLK
(
UL
(
CONFIG_VECTORS_BASE
),
UL
(
CONFIG_VECTORS_BASE
)
+
(
PAGE_SIZE
)),
MLK
(
FIXADDR_START
,
FIXADDR_TOP
),
#ifdef CONFIG_MMU
MLM
(
CONSISTENT_BASE
,
CONSISTENT_END
),
#endif
MLM
(
VMALLOC_START
,
VMALLOC_END
),
MLM
(
PAGE_OFFSET
,
(
unsigned
long
)
high_memory
),
#ifdef CONFIG_HIGHMEM
MLM
(
PKMAP_BASE
,
(
PKMAP_BASE
)
+
(
LAST_PKMAP
)
*
(
PAGE_SIZE
)),
#endif
MLM
(
MODULES_VADDR
,
MODULES_END
),
MLK_ROUNDUP
(
__init_begin
,
__init_end
),
MLK_ROUNDUP
(
_text
,
_etext
),
MLK_ROUNDUP
(
_data
,
_edata
));
#undef MLK
#undef MLM
#undef MLK_ROUNDUP
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
*/
#ifdef CONFIG_MMU
BUILD_BUG_ON
(
VMALLOC_END
>
CONSISTENT_BASE
);
BUG_ON
(
VMALLOC_END
>
CONSISTENT_BASE
);
BUILD_BUG_ON
(
TASK_SIZE
>
MODULES_VADDR
);
BUG_ON
(
TASK_SIZE
>
MODULES_VADDR
);
#endif
#ifdef CONFIG_HIGHMEM
BUILD_BUG_ON
(
PKMAP_BASE
+
LAST_PKMAP
*
PAGE_SIZE
>
PAGE_OFFSET
);
BUG_ON
(
PKMAP_BASE
+
LAST_PKMAP
*
PAGE_SIZE
>
PAGE_OFFSET
);
#endif
if
(
PAGE_SIZE
>=
16384
&&
num_physpages
<=
128
)
{
extern
int
sysctl_overcommit_memory
;
/*
...
...
arch/arm/mm/ioremap.c
View file @
2741ecb4
...
...
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
* Note that get_vm_area
() allocates a guard 4K page, so we need to mask
* the size back to 1MB aligned or we will overflow in the loop below.
* Note that get_vm_area
_caller() allocates a guard 4K page, so we need to
*
mask
the size back to 1MB aligned or we will overflow in the loop below.
*/
static
void
unmap_area_sections
(
unsigned
long
virt
,
unsigned
long
size
)
{
...
...
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
* mapping. See <asm/pgtable.h> for more information.
*/
void
__iomem
*
__arm_ioremap_pfn
(
unsigned
long
pfn
,
unsigned
long
offset
,
size_t
size
,
unsigned
int
mtype
)
void
__iomem
*
__arm_ioremap_pfn_caller
(
unsigned
long
pfn
,
unsigned
long
offset
,
size_t
size
,
unsigned
int
mtype
,
void
*
caller
)
{
const
struct
mem_type
*
type
;
int
err
;
...
...
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
*/
size
=
PAGE_ALIGN
(
offset
+
size
);
area
=
get_vm_area
(
size
,
VM_IOREMAP
);
area
=
get_vm_area_caller
(
size
,
VM_IOREMAP
,
caller
);
if
(
!
area
)
return
NULL
;
addr
=
(
unsigned
long
)
area
->
addr
;
...
...
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
flush_cache_vmap
(
addr
,
addr
+
size
);
return
(
void
__iomem
*
)
(
offset
+
addr
);
}
EXPORT_SYMBOL
(
__arm_ioremap_pfn
);
void
__iomem
*
__arm_ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
int
mtype
)
void
__iomem
*
__arm_ioremap_caller
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
int
mtype
,
void
*
caller
)
{
unsigned
long
last_addr
;
unsigned
long
offset
=
phys_addr
&
~
PAGE_MASK
;
...
...
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
if
(
!
size
||
last_addr
<
phys_addr
)
return
NULL
;
return
__arm_ioremap_pfn
(
pfn
,
offset
,
size
,
mtype
);
return
__arm_ioremap_pfn_caller
(
pfn
,
offset
,
size
,
mtype
,
caller
);
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
void
__iomem
*
__arm_ioremap_pfn
(
unsigned
long
pfn
,
unsigned
long
offset
,
size_t
size
,
unsigned
int
mtype
)
{
return
__arm_ioremap_pfn_caller
(
pfn
,
offset
,
size
,
mtype
,
__builtin_return_address
(
0
));
}
EXPORT_SYMBOL
(
__arm_ioremap_pfn
);
void
__iomem
*
__arm_ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
int
mtype
)
{
return
__arm_ioremap_caller
(
phys_addr
,
size
,
mtype
,
__builtin_return_address
(
0
));
}
EXPORT_SYMBOL
(
__arm_ioremap
);
...
...
arch/arm/mm/mmu.c
View file @
2741ecb4
...
...
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = {
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
static
void
__init
early_cachepolicy
(
char
*
*
p
)
static
int
__init
early_cachepolicy
(
char
*
p
)
{
int
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
cache_policies
);
i
++
)
{
int
len
=
strlen
(
cache_policies
[
i
].
policy
);
if
(
memcmp
(
*
p
,
cache_policies
[
i
].
policy
,
len
)
==
0
)
{
if
(
memcmp
(
p
,
cache_policies
[
i
].
policy
,
len
)
==
0
)
{
cachepolicy
=
i
;
cr_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
cr_no_alignment
&=
~
cache_policies
[
i
].
cr_mask
;
*
p
+=
len
;
break
;
}
}
...
...
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p)
}
flush_cache_all
();
set_cr
(
cr_alignment
);
return
0
;
}
__early_param
(
"cachepolicy=
"
,
early_cachepolicy
);
early_param
(
"cachepolicy
"
,
early_cachepolicy
);
static
void
__init
early_nocache
(
char
*
*
__unused
)
static
int
__init
early_nocache
(
char
*
__unused
)
{
char
*
p
=
"buffered"
;
printk
(
KERN_WARNING
"nocache is deprecated; use cachepolicy=%s
\n
"
,
p
);
early_cachepolicy
(
&
p
);
early_cachepolicy
(
p
);
return
0
;
}
__
early_param
(
"nocache"
,
early_nocache
);
early_param
(
"nocache"
,
early_nocache
);
static
void
__init
early_nowrite
(
char
*
*
__unused
)
static
int
__init
early_nowrite
(
char
*
__unused
)
{
char
*
p
=
"uncached"
;
printk
(
KERN_WARNING
"nowb is deprecated; use cachepolicy=%s
\n
"
,
p
);
early_cachepolicy
(
&
p
);
early_cachepolicy
(
p
);
return
0
;
}
__
early_param
(
"nowb"
,
early_nowrite
);
early_param
(
"nowb"
,
early_nowrite
);
static
void
__init
early_ecc
(
char
*
*
p
)
static
int
__init
early_ecc
(
char
*
p
)
{
if
(
memcmp
(
*
p
,
"on"
,
2
)
==
0
)
{
if
(
memcmp
(
p
,
"on"
,
2
)
==
0
)
ecc_mask
=
PMD_PROTECTION
;
*
p
+=
2
;
}
else
if
(
memcmp
(
*
p
,
"off"
,
3
)
==
0
)
{
else
if
(
memcmp
(
p
,
"off"
,
3
)
==
0
)
ecc_mask
=
0
;
*
p
+=
3
;
}
return
0
;
}
__early_param
(
"ecc=
"
,
early_ecc
);
early_param
(
"ecc
"
,
early_ecc
);
static
int
__init
noalign_setup
(
char
*
__unused
)
{
...
...
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
*/
static
void
__init
early_vmalloc
(
char
*
*
arg
)
static
int
__init
early_vmalloc
(
char
*
arg
)
{
vmalloc_reserve
=
memparse
(
*
arg
,
arg
);
vmalloc_reserve
=
memparse
(
arg
,
NULL
);
if
(
vmalloc_reserve
<
SZ_16M
)
{
vmalloc_reserve
=
SZ_16M
;
...
...
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg)
"vmalloc area is too big, limiting to %luMB
\n
"
,
vmalloc_reserve
>>
20
);
}
return
0
;
}
__early_param
(
"vmalloc=
"
,
early_vmalloc
);
early_param
(
"vmalloc
"
,
early_vmalloc
);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
...
...
arch/arm/mm/nommu.c
View file @
2741ecb4
...
...
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL
(
__arm_ioremap_pfn
);
void
__iomem
*
__arm_ioremap_pfn_caller
(
unsigned
long
pfn
,
unsigned
long
offset
,
size_t
size
,
unsigned
int
mtype
,
void
*
caller
)
{
return
__arm_ioremap_pfn
(
pfn
,
offset
,
size
,
mtype
);
}
void
__iomem
*
__arm_ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
int
mtype
)
{
...
...
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL
(
__arm_ioremap
);
void
__iomem
*
__arm_ioremap
(
unsigned
long
phys_addr
,
size_t
size
,
unsigned
int
mtype
,
void
*
caller
)
{
return
__arm_ioremap
(
phys_addr
,
size
,
mtype
);
}
void
__iounmap
(
volatile
void
__iomem
*
addr
)
{
}
...
...
arch/arm/plat-iop/io.c
View file @
2741ecb4
...
...
@@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
retval
=
(
void
*
)
IOP3XX_PMMR_PHYS_TO_VIRT
(
cookie
);
break
;
default:
retval
=
__arm_ioremap
(
cookie
,
size
,
mtype
);
retval
=
__arm_ioremap_caller
(
cookie
,
size
,
mtype
,
__builtin_return_address
(
0
));
}
return
retval
;
...
...
arch/arm/plat-mxc/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -21,6 +21,6 @@
#define __ASM_ARCH_MXC_VMALLOC_H__
/* vmalloc ending address */
#define VMALLOC_END 0x
F4000000
#define VMALLOC_END 0x
f4000000UL
#endif
/* __ASM_ARCH_MXC_VMALLOC_H__ */
arch/arm/plat-omap/Kconfig
View file @
2741ecb4
...
...
@@ -22,6 +22,7 @@ config ARCH_OMAP3
bool "TI OMAP3"
select CPU_V7
select COMMON_CLKDEV
select ARM_L1_CACHE_SHIFT_6
config ARCH_OMAP4
bool "TI OMAP4"
...
...
arch/arm/plat-omap/include/plat/omap44xx.h
View file @
2741ecb4
...
...
@@ -40,6 +40,7 @@
#define OMAP44XX_GIC_CPU_BASE 0x48240100
#define OMAP44XX_SCU_BASE 0x48240000
#define OMAP44XX_LOCAL_TWD_BASE 0x48240600
#define OMAP44XX_L2CACHE_BASE 0x48242000
#define OMAP44XX_WKUPGEN_BASE 0x48281000
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
...
...
arch/arm/plat-omap/io.c
View file @
2741ecb4
...
...
@@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
return
XLATE
(
p
,
L4_EMU_44XX_PHYS
,
L4_EMU_44XX_VIRT
);
}
#endif
return
__arm_ioremap
(
p
,
size
,
type
);
return
__arm_ioremap
_caller
(
p
,
size
,
type
,
__builtin_return_address
(
0
)
);
}
EXPORT_SYMBOL
(
omap_ioremap
);
...
...
arch/arm/plat-s3c/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -15,6 +15,6 @@
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
#define VMALLOC_END (0x
E0000000
)
#define VMALLOC_END (0x
e0000000UL
)
#endif
/* __ASM_ARCH_VMALLOC_H */
arch/arm/plat-stmp3xxx/include/mach/vmalloc.h
View file @
2741ecb4
...
...
@@ -9,4 +9,4 @@
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#define VMALLOC_END
(0xF0000000)
#define VMALLOC_END
0xf0000000UL
arch/arm/vfp/vfpmodule.c
View file @
2741ecb4
...
...
@@ -430,7 +430,11 @@ static inline void vfp_pm_init(void) { }
* saved one. This function is used by the ptrace mechanism.
*/
#ifdef CONFIG_SMP
void
vfp_sync_state
(
struct
thread_info
*
thread
)
void
vfp_sync_hwstate
(
struct
thread_info
*
thread
)
{
}
void
vfp_flush_hwstate
(
struct
thread_info
*
thread
)
{
/*
* On SMP systems, the VFP state is automatically saved at every
...
...
@@ -441,35 +445,48 @@ void vfp_sync_state(struct thread_info *thread)
thread
->
vfpstate
.
hard
.
cpu
=
NR_CPUS
;
}
#else
void
vfp_sync_state
(
struct
thread_info
*
thread
)
void
vfp_sync_
hw
state
(
struct
thread_info
*
thread
)
{
unsigned
int
cpu
=
get_cpu
();
u32
fpexc
=
fmrx
(
FPEXC
);
/*
* If
VFP is enabled, the previous state was already saved and
*
last_VFP_context updated
.
* If
the thread we're interested in is the current owner of the
*
hardware VFP state, then we need to save its state
.
*/
if
(
fpexc
&
FPEXC_EN
)
goto
out
;
if
(
!
last_VFP_context
[
cpu
])
goto
out
;
if
(
last_VFP_context
[
cpu
]
==
&
thread
->
vfpstate
)
{
u32
fpexc
=
fmrx
(
FPEXC
);
/*
* Save the last VFP state on this CPU.
*/
fmxr
(
FPEXC
,
fpexc
|
FPEXC_EN
);
vfp_save_state
(
last_VFP_context
[
cpu
],
fpexc
);
vfp_save_state
(
&
thread
->
vfpstate
,
fpexc
|
FPEXC_EN
);
fmxr
(
FPEXC
,
fpexc
);
}
put_cpu
();
}
void
vfp_flush_hwstate
(
struct
thread_info
*
thread
)
{
unsigned
int
cpu
=
get_cpu
();
/*
* If the thread we're interested in is the current owner of the
* hardware VFP state, then we need to save its state.
*/
if
(
last_VFP_context
[
cpu
]
==
&
thread
->
vfpstate
)
{
u32
fpexc
=
fmrx
(
FPEXC
);
fmxr
(
FPEXC
,
fpexc
&
~
FPEXC_EN
);
/*
* Set the context to NULL to force a reload the next time the thread
*
uses the VFP.
* Set the context to NULL to force a reload the next time
* the thread
uses the VFP.
*/
last_VFP_context
[
cpu
]
=
NULL
;
}
out:
put_cpu
();
}
#endif
...
...
drivers/serial/amba-pl011.c
View file @
2741ecb4
...
...
@@ -71,6 +71,7 @@ struct uart_amba_port {
unsigned
int
im
;
/* interrupt mask */
unsigned
int
old_status
;
unsigned
int
ifls
;
/* vendor-specific */
bool
autorts
;
};
/* There is by now at least one vendor with differing details, so handle it */
...
...
@@ -308,6 +309,11 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
TIOCMBIT
(
TIOCM_OUT1
,
UART011_CR_OUT1
);
TIOCMBIT
(
TIOCM_OUT2
,
UART011_CR_OUT2
);
TIOCMBIT
(
TIOCM_LOOP
,
UART011_CR_LBE
);
if
(
uap
->
autorts
)
{
/* We need to disable auto-RTS if we want to turn RTS off */
TIOCMBIT
(
TIOCM_RTS
,
UART011_CR_RTSEN
);
}
#undef TIOCMBIT
writew
(
cr
,
uap
->
port
.
membase
+
UART011_CR
);
...
...
@@ -437,6 +443,7 @@ static void pl011_shutdown(struct uart_port *port)
/*
* disable the port
*/
uap
->
autorts
=
false
;
writew
(
UART01x_CR_UARTEN
|
UART011_CR_TXE
,
uap
->
port
.
membase
+
UART011_CR
);
/*
...
...
@@ -456,6 +463,7 @@ static void
pl011_set_termios
(
struct
uart_port
*
port
,
struct
ktermios
*
termios
,
struct
ktermios
*
old
)
{
struct
uart_amba_port
*
uap
=
(
struct
uart_amba_port
*
)
port
;
unsigned
int
lcr_h
,
old_cr
;
unsigned
long
flags
;
unsigned
int
baud
,
quot
;
...
...
@@ -532,6 +540,17 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
old_cr
=
readw
(
port
->
membase
+
UART011_CR
);
writew
(
0
,
port
->
membase
+
UART011_CR
);
if
(
termios
->
c_cflag
&
CRTSCTS
)
{
if
(
old_cr
&
UART011_CR_RTS
)
old_cr
|=
UART011_CR_RTSEN
;
old_cr
|=
UART011_CR_CTSEN
;
uap
->
autorts
=
true
;
}
else
{
old_cr
&=
~
(
UART011_CR_CTSEN
|
UART011_CR_RTSEN
);
uap
->
autorts
=
false
;
}
/* Set baud rate */
writew
(
quot
&
0x3f
,
port
->
membase
+
UART011_FBRD
);
writew
(
quot
>>
6
,
port
->
membase
+
UART011_IBRD
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment