Commit 16d87757 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (91 commits)
  ARM: 6806/1: irq: introduce entry and exit functions for chained handlers
  ARM: 6781/1: Thumb-2: Work around buggy Thumb-2 short branch relocations in gas
  ARM: 6747/1: P2V: Thumb2 support
  ARM: 6798/1: aout-core: zero thread debug registers in a.out core dump
  ARM: 6796/1: Footbridge: Fix I/O mappings for NOMMU mode
  ARM: 6784/1: errata: no automatic Store Buffer drain on Cortex-A9
  ARM: 6772/1: errata: possible fault MMU translations following an ASID switch
  ARM: 6776/1: mach-ux500: activate fix for errata 753970
  ARM: 6794/1: SPEAr: Append UL to device address macros.
  ARM: 6793/1: SPEAr: Remove unused *_SIZE macros from spear*.h files
  ARM: 6792/1: SPEAr: Replace SIZE macro's with SZ_4K macros
  ARM: 6791/1: SPEAr3xx: Declare device structures after shirq code
  ARM: 6790/1: SPEAr: Clock Framework: Rename usbd clock and align apb_clk entry
  ARM: 6789/1: SPEAr3xx: Rename sdio to sdhci
  ARM: 6788/1: SPEAr: Include mach/hardware.h instead of mach/spear.h
  ARM: 6787/1: SPEAr: Reorder #includes in .h & .c files.
  ARM: 6681/1: SPEAr: add debugfs support to clk API
  ARM: 6703/1: SPEAr: update clk API support
  ARM: 6679/1: SPEAr: make clk API functions more generic
  ARM: 6737/1: SPEAr: formalized timer support
  ...
parents e3455133 05e34754
BIN := vrl4
.PHONY: all
all: $(BIN)
.PHONY: clean
clean:
rm -f *.o $(BIN)
/*
* vrl4 format generator
*
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/*
* usage: vrl4 < zImage > out
* dd if=out of=/dev/sdx bs=512 seek=1 # Write the image to sector 1
*
* Reads a zImage from stdin and writes a vrl4 image to stdout.
* In practice this means writing a padded vrl4 header to stdout followed
* by the zImage.
*
* The padding places the zImage at ALIGN bytes into the output.
* The vrl4 uses ALIGN + START_BASE as the start_address.
* This is where the mask ROM will jump to after verifying the header.
*
* The header sets copy_size to min(sizeof(zImage), MAX_BOOT_PROG_LEN) + ALIGN.
* That is, the mask ROM will load the padded header (ALIGN bytes)
* And then MAX_BOOT_PROG_LEN bytes of the image, or the entire image,
* whichever is smaller.
*
* The zImage is not modified in any way.
*/
#define _BSD_SOURCE
#include <endian.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
#include <errno.h>
struct hdr {
uint32_t magic1;
uint32_t reserved1;
uint32_t magic2;
uint32_t reserved2;
uint16_t copy_size;
uint16_t boot_options;
uint32_t reserved3;
uint32_t start_address;
uint32_t reserved4;
uint32_t reserved5;
char reserved6[308];
};
#define DECLARE_HDR(h) \
struct hdr (h) = { \
.magic1 = htole32(0xea000000), \
.reserved1 = htole32(0x56), \
.magic2 = htole32(0xe59ff008), \
.reserved3 = htole16(0x1) }
/* Align to 512 bytes, the MMCIF sector size */
#define ALIGN_BITS 9
#define ALIGN (1 << ALIGN_BITS)
#define START_BASE 0xe55b0000
/*
* With an alignment of 512 the header uses the first sector.
* There is a 128 sector (64kbyte) limit on the data loaded by the mask ROM.
* So there are 127 sectors left for the boot programme. But in practice
* Only a small portion of a zImage is needed, 16 sectors should be more
* than enough.
*
* Note that this sets how much of the zImage is copied by the mask ROM.
* The entire zImage is present after the header and is loaded
* by the code in the boot program (which is the first portion of the zImage).
*/
#define MAX_BOOT_PROG_LEN (16 * 512)
#define ROUND_UP(x) ((x + ALIGN - 1) & ~(ALIGN - 1))
ssize_t do_read(int fd, void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = read(fd, buf + offset, count - offset);
if (!l)
break;
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("read");
return -1;
}
offset += l;
}
return offset;
}
ssize_t do_write(int fd, const void *buf, size_t count)
{
size_t offset = 0;
ssize_t l;
while (offset < count) {
l = write(fd, buf + offset, count - offset);
if (l < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
continue;
perror("write");
return -1;
}
offset += l;
}
return offset;
}
ssize_t write_zero(int fd, size_t len)
{
size_t i = len;
while (i--) {
const char x = 0;
if (do_write(fd, &x, 1) < 0)
return -1;
}
return len;
}
int main(void)
{
DECLARE_HDR(hdr);
char boot_program[MAX_BOOT_PROG_LEN];
size_t aligned_hdr_len, alligned_prog_len;
ssize_t prog_len;
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len <= 0)
return -1;
aligned_hdr_len = ROUND_UP(sizeof(hdr));
hdr.start_address = htole32(START_BASE + aligned_hdr_len);
alligned_prog_len = ROUND_UP(prog_len);
hdr.copy_size = htole16(aligned_hdr_len + alligned_prog_len);
if (do_write(1, &hdr, sizeof(hdr)) < 0)
return -1;
if (write_zero(1, aligned_hdr_len - sizeof(hdr)) < 0)
return -1;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
/* Write out the rest of the kernel */
while (1) {
prog_len = do_read(0, boot_program, sizeof(boot_program));
if (prog_len < 0)
return 1;
if (prog_len == 0)
break;
if (do_write(1, boot_program, prog_len) < 0)
return 1;
}
return 0;
}
ROM-able zImage boot from MMC
-----------------------------
An ROM-able zImage compiled with ZBOOT_ROM_MMCIF may be written to MMC and
SuperH Mobile ARM will to boot directly from the MMCIF hardware block.
This is achieved by the mask ROM loading the first portion of the image into
MERAM and then jumping to it. This portion contains loader code which
copies the entire image to SDRAM and jumps to it. From there the zImage
boot code proceeds as normal, uncompressing the image into its final
location and then jumping to it.
This code has been tested on an AP4EB board using the developer 1A eMMC
boot mode which is configured using the following jumper settings.
The board used for testing required a patched mask ROM in order for
this mode to function.
8 7 6 5 4 3 2 1
x|x|x|x|x| |x|
S4 -+-+-+-+-+-+-+-
| | | | |x| |x on
The zImage must be written to the MMC card at sector 1 (512 bytes) in
vrl4 format. A utility vrl4 is supplied to accomplish this.
e.g.
vrl4 < zImage | dd of=/dev/sdX bs=512 seek=1
A dual-voltage MMC 4.0 card was used for testing.
...@@ -7,7 +7,7 @@ config ARM ...@@ -7,7 +7,7 @@ config ARM
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select RTC_LIB select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION select SYS_SUPPORTS_APM_EMULATION
select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI) select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
select HAVE_OPROFILE if (HAVE_PERF_EVENTS) select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
...@@ -24,7 +24,7 @@ config ARM ...@@ -24,7 +24,7 @@ config ARM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7)) select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
select HAVE_C_RECORDMCOUNT select HAVE_C_RECORDMCOUNT
select HAVE_GENERIC_HARDIRQS select HAVE_GENERIC_HARDIRQS
select HAVE_SPARSE_IRQ select HAVE_SPARSE_IRQ
...@@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST ...@@ -63,6 +63,10 @@ config GENERIC_CLOCKEVENTS_BROADCAST
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
default y if SMP default y if SMP
config KTIME_SCALAR
bool
default y
config HAVE_TCM config HAVE_TCM
bool bool
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
...@@ -178,11 +182,6 @@ config FIQ ...@@ -178,11 +182,6 @@ config FIQ
config ARCH_MTD_XIP config ARCH_MTD_XIP
bool bool
config ARM_L1_CACHE_SHIFT_6
bool
help
Setting ARM L1 cache line size to 64 Bytes.
config VECTORS_BASE config VECTORS_BASE
hex hex
default 0xffff0000 if MMU || CPU_HIGH_VECTOR default 0xffff0000 if MMU || CPU_HIGH_VECTOR
...@@ -191,6 +190,22 @@ config VECTORS_BASE ...@@ -191,6 +190,22 @@ config VECTORS_BASE
help help
The base address of exception vectors. The base address of exception vectors.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on !XIP_KERNEL && MMU
depends on !ARCH_REALVIEW || !SPARSEMEM
help
Patch phys-to-virt translation functions at runtime according to
the position of the kernel in system memory.
This can only be used with non-XIP with MMU kernels where
the base of physical memory is at a 16MB boundary.
config ARM_PATCH_PHYS_VIRT_16BIT
def_bool y
depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
...@@ -346,7 +361,7 @@ config ARCH_FOOTBRIDGE ...@@ -346,7 +361,7 @@ config ARCH_FOOTBRIDGE
bool "FootBridge" bool "FootBridge"
select CPU_SA110 select CPU_SA110
select FOOTBRIDGE select FOOTBRIDGE
select ARCH_USES_GETTIMEOFFSET select GENERIC_CLOCKEVENTS
help help
Support for systems based on the DC21285 companion chip Support for systems based on the DC21285 companion chip
("FootBridge"), such as the Simtec CATS and the Rebel NetWinder. ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
...@@ -457,6 +472,7 @@ config ARCH_IXP4XX ...@@ -457,6 +472,7 @@ config ARCH_IXP4XX
config ARCH_DOVE config ARCH_DOVE
bool "Marvell Dove" bool "Marvell Dove"
select CPU_V6K
select PCI select PCI
select ARCH_REQUIRE_GPIOLIB select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
...@@ -875,6 +891,16 @@ config PLAT_SPEAR ...@@ -875,6 +891,16 @@ config PLAT_SPEAR
help help
Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx). Support for ST's SPEAr platform (SPEAr3xx, SPEAr6xx and SPEAr13xx).
config ARCH_VT8500
bool "VIA/WonderMedia 85xx"
select CPU_ARM926T
select GENERIC_GPIO
select ARCH_HAS_CPUFREQ
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
select HAVE_PWM
help
Support for VIA/WonderMedia VT8500/WM85xx System-on-Chip.
endchoice endchoice
# #
...@@ -1007,6 +1033,8 @@ source "arch/arm/mach-versatile/Kconfig" ...@@ -1007,6 +1033,8 @@ source "arch/arm/mach-versatile/Kconfig"
source "arch/arm/mach-vexpress/Kconfig" source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/mach-vt8500/Kconfig"
source "arch/arm/mach-w90x900/Kconfig" source "arch/arm/mach-w90x900/Kconfig"
# Definitions to make life easier # Definitions to make life easier
...@@ -1048,7 +1076,7 @@ config XSCALE_PMU ...@@ -1048,7 +1076,7 @@ config XSCALE_PMU
default y default y
config CPU_HAS_PMU config CPU_HAS_PMU
depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \ depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU) (!ARCH_OMAP3 || OMAP3_EMU)
default y default y
bool bool
...@@ -1064,7 +1092,7 @@ endif ...@@ -1064,7 +1092,7 @@ endif
config ARM_ERRATA_411920 config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail" bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
depends on CPU_V6 depends on CPU_V6 || CPU_V6K
help help
Invalidation of the Instruction Cache operation can Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
...@@ -1140,7 +1168,7 @@ config ARM_ERRATA_742231 ...@@ -1140,7 +1168,7 @@ config ARM_ERRATA_742231
config PL310_ERRATA_588369 config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines" bool "Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 && ARCH_OMAP4 depends on CACHE_L2X0
help help
The PL310 L2 cache controller implements three types of Clean & The PL310 L2 cache controller implements three types of Clean &
Invalidate maintenance operations: by Physical Address Invalidate maintenance operations: by Physical Address
...@@ -1149,8 +1177,7 @@ config PL310_ERRATA_588369 ...@@ -1149,8 +1177,7 @@ config PL310_ERRATA_588369
clean operation followed immediately by an invalidate operation, clean operation followed immediately by an invalidate operation,
both performing to the same memory location. This functionality both performing to the same memory location. This functionality
is not correctly implemented in PL310 as clean lines are not is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations. Note that this errata invalidated as a result of these operations.
uses Texas Instrument's secure monitor api.
config ARM_ERRATA_720789 config ARM_ERRATA_720789
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
...@@ -1164,6 +1191,17 @@ config ARM_ERRATA_720789 ...@@ -1164,6 +1191,17 @@ config ARM_ERRATA_720789
tables. The workaround changes the TLB flushing routines to invalidate tables. The workaround changes the TLB flushing routines to invalidate
entries regardless of the ASID. entries regardless of the ASID.
config PL310_ERRATA_727915
bool "Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
operation (offset 0x7FC). This operation runs in background so that
PL310 can handle normal accesses while it is in progress. Under very
rare circumstances, due to this erratum, write data can be lost when
PL310 treats a cacheable write transaction during a Clean &
Invalidate by Way operation.
config ARM_ERRATA_743622 config ARM_ERRATA_743622
bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
depends on CPU_V7 depends on CPU_V7
...@@ -1202,6 +1240,28 @@ config ARM_ERRATA_753970 ...@@ -1202,6 +1240,28 @@ config ARM_ERRATA_753970
This has the same effect as the cache sync operation: store buffer This has the same effect as the cache sync operation: store buffer
drain and waiting for all buffers empty. drain and waiting for all buffers empty.
config ARM_ERRATA_754322
bool "ARM errata: possible faulty MMU translations following an ASID switch"
depends on CPU_V7
help
This option enables the workaround for the 754322 Cortex-A9 (r2p*,
r3p*) erratum. A speculative memory access may cause a page table walk
which starts prior to an ASID switch but completes afterwards. This
can populate the micro-TLB with a stale entry which may be hit with
the new ASID. This workaround places two dsb instructions in the mm
switching code so that no page table walks can cross the ASID switch.
config ARM_ERRATA_754327
bool "ARM errata: no automatic Store Buffer drain"
depends on CPU_V7 && SMP
help
This option enables the workaround for the 754327 Cortex-A9 (prior to
r2p0) erratum. The Store Buffer does not have any automatic draining
mechanism and therefore a livelock may occur if an external agent
continuously polls a memory location waiting to observe an update.
This workaround defines cpu_relax() as smp_mb(), preventing correctly
written polling loops from denying visibility of updates to memory.
endmenu endmenu
source "arch/arm/common/Kconfig" source "arch/arm/common/Kconfig"
...@@ -1275,6 +1335,7 @@ source "kernel/time/Kconfig" ...@@ -1275,6 +1335,7 @@ source "kernel/time/Kconfig"
config SMP config SMP
bool "Symmetric Multi-Processing (EXPERIMENTAL)" bool "Symmetric Multi-Processing (EXPERIMENTAL)"
depends on EXPERIMENTAL depends on EXPERIMENTAL
depends on CPU_V6K || CPU_V7
depends on GENERIC_CLOCKEVENTS depends on GENERIC_CLOCKEVENTS
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
...@@ -1386,7 +1447,7 @@ config HZ ...@@ -1386,7 +1447,7 @@ config HZ
config THUMB2_KERNEL config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)" bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
select AEABI select AEABI
select ARM_ASM_UNIFIED select ARM_ASM_UNIFIED
help help
...@@ -1396,6 +1457,37 @@ config THUMB2_KERNEL ...@@ -1396,6 +1457,37 @@ config THUMB2_KERNEL
If unsure, say N. If unsure, say N.
config THUMB2_AVOID_R_ARM_THM_JUMP11
bool "Work around buggy Thumb-2 short branch relocations in gas"
depends on THUMB2_KERNEL && MODULES
default y
help
Various binutils versions can resolve Thumb-2 branches to
locally-defined, preemptible global symbols as short-range "b.n"
branch instructions.
This is a problem, because there's no guarantee the final
destination of the symbol, or any candidate locations for a
trampoline, are within range of the branch. For this reason, the
kernel does not support fixing up the R_ARM_THM_JUMP11 (102)
relocation in modules at all, and it makes little sense to add
support.
The symptom is that the kernel fails with an "unsupported
relocation" error when loading some modules.
Until fixed tools are available, passing
-fno-optimize-sibling-calls to gcc should prevent gcc generating
code which hits this problem, at the cost of a bit of extra runtime
stack usage in some cases.
The problem is described in more detail at:
https://bugs.launchpad.net/binutils-linaro/+bug/725126
Only Thumb-2 kernels are affected.
Unless you are sure your tools don't have this problem, say Y.
config ARM_ASM_UNIFIED config ARM_ASM_UNIFIED
bool bool
...@@ -1644,6 +1736,18 @@ config ZBOOT_ROM ...@@ -1644,6 +1736,18 @@ config ZBOOT_ROM
Say Y here if you intend to execute your compressed kernel image Say Y here if you intend to execute your compressed kernel image
(zImage) directly from ROM or flash. If unsure, say N. (zImage) directly from ROM or flash. If unsure, say N.
config ZBOOT_ROM_MMCIF
bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
help
Say Y here to include experimental MMCIF loading code in the
ROM-able zImage. With this enabled it is possible to write the
the ROM-able zImage kernel image to an MMC card and boot the
kernel straight from the reset vector. At reset the processor
Mask ROM will load the first part of the the ROM-able zImage
which in turn loads the rest the kernel image to RAM using the
MMCIF hardware block.
config CMDLINE config CMDLINE
string "Default kernel command string" string "Default kernel command string"
default "" default ""
...@@ -1877,7 +1981,7 @@ config FPE_FASTFPE ...@@ -1877,7 +1981,7 @@ config FPE_FASTFPE
config VFP config VFP
bool "VFP-format floating point maths" bool "VFP-format floating point maths"
depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
help help
Say Y to include VFP support code in the kernel. This is needed Say Y to include VFP support code in the kernel. This is needed
if your hardware includes a VFP unit. if your hardware includes a VFP unit.
......
...@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) ...@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
ifeq ($(CONFIG_AEABI),y) ifeq ($(CONFIG_AEABI),y)
CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork
...@@ -105,6 +106,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau ...@@ -105,6 +106,10 @@ AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mau
AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN) CFLAGS_THUMB2 :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb AFLAGS_THUMB2 :=$(CFLAGS_THUMB2) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
endif endif
# Need -Uarm for gcc < 3.x # Need -Uarm for gcc < 3.x
...@@ -190,6 +195,7 @@ machine-$(CONFIG_ARCH_U300) := u300 ...@@ -190,6 +195,7 @@ machine-$(CONFIG_ARCH_U300) := u300
machine-$(CONFIG_ARCH_U8500) := ux500 machine-$(CONFIG_ARCH_U8500) := ux500
machine-$(CONFIG_ARCH_VERSATILE) := versatile machine-$(CONFIG_ARCH_VERSATILE) := versatile
machine-$(CONFIG_ARCH_VEXPRESS) := vexpress machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900 machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_ARCH_NUC93X) := nuc93x machine-$(CONFIG_ARCH_NUC93X) := nuc93x
machine-$(CONFIG_FOOTBRIDGE) := footbridge machine-$(CONFIG_FOOTBRIDGE) := footbridge
...@@ -280,7 +286,7 @@ bzImage: zImage ...@@ -280,7 +286,7 @@ bzImage: zImage
zImage Image xipImage bootpImage uImage: vmlinux zImage Image xipImage bootpImage uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
zinstall install: vmlinux zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
# We use MRPROPER_FILES and CLEAN_FILES now # We use MRPROPER_FILES and CLEAN_FILES now
...@@ -301,6 +307,7 @@ define archhelp ...@@ -301,6 +307,7 @@ define archhelp
echo ' (supply initrd image via make variable INITRD=<path>)' echo ' (supply initrd image via make variable INITRD=<path>)'
echo ' install - Install uncompressed kernel' echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel' echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or' echo ' Install using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or' echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH) and run lilo' echo ' install to $$(INSTALL_PATH) and run lilo'
......
...@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage ...@@ -99,6 +99,10 @@ zinstall: $(obj)/zImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)" $(obj)/zImage System.map "$(INSTALL_PATH)"
uinstall: $(obj)/uImage
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/uImage System.map "$(INSTALL_PATH)"
zi: zi:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/zImage System.map "$(INSTALL_PATH)" $(obj)/zImage System.map "$(INSTALL_PATH)"
......
...@@ -4,9 +4,20 @@ ...@@ -4,9 +4,20 @@
# create a compressed vmlinuz image from the original vmlinux # create a compressed vmlinuz image from the original vmlinux
# #
OBJS =
# Ensure that mmcif loader code appears early in the image
# to minimise that number of bocks that have to be read in
# order to load it.
ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
ifeq ($(CONFIG_ARCH_SH7372),y)
OBJS += mmcif-sh7372.o
endif
endif
AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
HEAD = head.o HEAD = head.o
OBJS = misc.o decompress.o OBJS += misc.o decompress.o
FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
# #
...@@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y) ...@@ -29,6 +40,10 @@ ifeq ($(CONFIG_ARCH_SA1100),y)
OBJS += head-sa1100.o OBJS += head-sa1100.o
endif endif
ifeq ($(CONFIG_ARCH_VT8500),y)
OBJS += head-vt8500.o
endif
ifeq ($(CONFIG_CPU_XSCALE),y) ifeq ($(CONFIG_CPU_XSCALE),y)
OBJS += head-xscale.o OBJS += head-xscale.o
endif endif
...@@ -83,9 +98,11 @@ endif ...@@ -83,9 +98,11 @@ endif
EXTRA_CFLAGS := -fpic -fno-builtin EXTRA_CFLAGS := -fpic -fno-builtin
EXTRA_AFLAGS := -Wa,-march=all EXTRA_AFLAGS := -Wa,-march=all
# Provide size of uncompressed kernel to the decompressor via a linker symbol.
LDFLAGS_vmlinux = --defsym _image_size=$(shell stat -c "%s" $(obj)/../Image)
# Supply ZRELADDR to the decompressor via a linker symbol. # Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y) ifneq ($(CONFIG_AUTO_ZRELADDR),y)
LDFLAGS_vmlinux := --defsym zreladdr=$(ZRELADDR) LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
endif endif
ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8 LDFLAGS_vmlinux += --be8
......
...@@ -25,6 +25,36 @@ ...@@ -25,6 +25,36 @@
/* load board-specific initialization code */ /* load board-specific initialization code */
#include <mach/zboot.h> #include <mach/zboot.h>
#ifdef CONFIG_ZBOOT_ROM_MMCIF
/* Load image from MMC */
adr sp, __tmp_stack + 128
ldr r0, __image_start
ldr r1, __image_end
subs r1, r1, r0
ldr r0, __load_base
bl mmcif_loader
/* Jump to loaded code */
ldr r0, __loaded
ldr r1, __image_start
sub r0, r0, r1
ldr r1, __load_base
add pc, r0, r1
__image_start:
.long _start
__image_end:
.long _got_end
__load_base:
.long CONFIG_MEMORY_START + 0x02000000 @ Load at 32Mb into SDRAM
__loaded:
.long __continue
.align
__tmp_stack:
.space 128
__continue:
#endif /* CONFIG_ZBOOT_ROM_MMCIF */
b 1f b 1f
__atags:@ tag #1 __atags:@ tag #1
.long 12 @ tag->hdr.size = tag_size(tag_core); .long 12 @ tag->hdr.size = tag_size(tag_core);
......
/*
* linux/arch/arm/boot/compressed/head-vt8500.S
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* VIA VT8500 specific tweaks. This is merged into head.S by the linker.
*
*/
#include <linux/linkage.h>
#include <asm/mach-types.h>
.section ".start", "ax"
__VT8500_start:
@ Compare the SCC ID register against a list of known values
ldr r1, .SCCID
ldr r3, [r1]
@ VT8500 override
ldr r4, .VT8500SCC
cmp r3, r4
ldreq r7, .ID_BV07
beq .Lendvt8500
@ WM8505 override
ldr r4, .WM8505SCC
cmp r3, r4
ldreq r7, .ID_8505
beq .Lendvt8500
@ Otherwise, leave the bootloader's machine id untouched
.SCCID:
.word 0xd8120000
.VT8500SCC:
.word 0x34000102
.WM8505SCC:
.word 0x34260103
.ID_BV07:
.word MACH_TYPE_BV07
.ID_8505:
.word MACH_TYPE_WM8505_7IN_NETBOOK
.Lendvt8500:
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#if defined(CONFIG_DEBUG_ICEDCC) #if defined(CONFIG_DEBUG_ICEDCC)
#ifdef CONFIG_CPU_V6 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro loadsp, rb, tmp .macro loadsp, rb, tmp
.endm .endm
.macro writeb, ch, rb .macro writeb, ch, rb
...@@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0 ...@@ -128,14 +128,14 @@ wait: mrc p14, 0, pc, c0, c1, 0
.arm @ Always enter in ARM state .arm @ Always enter in ARM state
start: start:
.type start,#function .type start,#function
THUMB( adr r12, BSYM(1f) ) .rept 7
THUMB( bx r12 )
THUMB( .rept 6 )
ARM( .rept 8 )
mov r0, r0 mov r0, r0
.endr .endr
ARM( mov r0, r0 )
ARM( b 1f )
THUMB( adr r12, BSYM(1f) )
THUMB( bx r12 )
b 1f
.word 0x016f2818 @ Magic numbers to help the loader .word 0x016f2818 @ Magic numbers to help the loader
.word start @ absolute load/run zImage address .word start @ absolute load/run zImage address
.word _edata @ zImage end address .word _edata @ zImage end address
...@@ -174,9 +174,7 @@ not_angel: ...@@ -174,9 +174,7 @@ not_angel:
*/ */
.text .text
adr r0, LC0
ldmia r0, {r1, r2, r3, r5, r6, r11, ip}
ldr sp, [r0, #28]
#ifdef CONFIG_AUTO_ZRELADDR #ifdef CONFIG_AUTO_ZRELADDR
@ determine final kernel image address @ determine final kernel image address
mov r4, pc mov r4, pc
...@@ -185,35 +183,108 @@ not_angel: ...@@ -185,35 +183,108 @@ not_angel:
#else #else
ldr r4, =zreladdr ldr r4, =zreladdr
#endif #endif
subs r0, r0, r1 @ calculate the delta offset
@ if delta is zero, we are bl cache_on
beq not_relocated @ running at the address we
@ were linked at. restart: adr r0, LC0
ldmia r0, {r1, r2, r3, r5, r6, r9, r11, r12}
ldr sp, [r0, #32]
/*
* We might be running at a different address. We need
* to fix up various pointers.
*/
sub r0, r0, r1 @ calculate the delta offset
add r5, r5, r0 @ _start
add r6, r6, r0 @ _edata
#ifndef CONFIG_ZBOOT_ROM
/* malloc space is above the relocated stack (64k max) */
add sp, sp, r0
add r10, sp, #0x10000
#else
/* /*
* We're running at a different address. We need to fix * With ZBOOT_ROM the bss/stack is non relocatable,
* up various pointers: * but someone could still run this code from RAM,
* r5 - zImage base address (_start) * in which case our reference is _edata.
* r6 - size of decompressed image
* r11 - GOT start
* ip - GOT end
*/ */
add r5, r5, r0 mov r10, r6
#endif
/*
* Check to see if we will overwrite ourselves.
* r4 = final kernel address
* r5 = start of this image
* r9 = size of decompressed image
* r10 = end of this image, including bss/stack/malloc space if non XIP
* We basically want:
* r4 >= r10 -> OK
* r4 + image length <= r5 -> OK
*/
cmp r4, r10
bhs wont_overwrite
add r10, r4, r9
cmp r10, r5
bls wont_overwrite
/*
* Relocate ourselves past the end of the decompressed kernel.
* r5 = start of this image
* r6 = _edata
* r10 = end of the decompressed kernel
* Because we always copy ahead, we need to do it from the end and go
* backward in case the source and destination overlap.
*/
/* Round up to next 256-byte boundary. */
add r10, r10, #256
bic r10, r10, #255
sub r9, r6, r5 @ size to copy
add r9, r9, #31 @ rounded up to a multiple
bic r9, r9, #31 @ ... of 32 bytes
add r6, r9, r5
add r9, r9, r10
1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
cmp r6, r5
stmdb r9!, {r0 - r3, r10 - r12, lr}
bhi 1b
/* Preserve offset to relocated code. */
sub r6, r9, r6
bl cache_clean_flush
adr r0, BSYM(restart)
add r0, r0, r6
mov pc, r0
wont_overwrite:
/*
* If delta is zero, we are running at the address we were linked at.
* r0 = delta
* r2 = BSS start
* r3 = BSS end
* r4 = kernel execution address
* r7 = architecture ID
* r8 = atags pointer
* r11 = GOT start
* r12 = GOT end
* sp = stack pointer
*/
teq r0, #0
beq not_relocated
add r11, r11, r0 add r11, r11, r0
add ip, ip, r0 add r12, r12, r0
#ifndef CONFIG_ZBOOT_ROM #ifndef CONFIG_ZBOOT_ROM
/* /*
* If we're running fully PIC === CONFIG_ZBOOT_ROM = n, * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
* we need to fix up pointers into the BSS region. * we need to fix up pointers into the BSS region.
* r2 - BSS start * Note that the stack pointer has already been fixed up.
* r3 - BSS end
* sp - stack pointer
*/ */
add r2, r2, r0 add r2, r2, r0
add r3, r3, r0 add r3, r3, r0
add sp, sp, r0
/* /*
* Relocate all entries in the GOT table. * Relocate all entries in the GOT table.
...@@ -221,7 +292,7 @@ not_angel: ...@@ -221,7 +292,7 @@ not_angel:
1: ldr r1, [r11, #0] @ relocate entries in the GOT 1: ldr r1, [r11, #0] @ relocate entries in the GOT
add r1, r1, r0 @ table. This fixes up the add r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references. str r1, [r11], #4 @ C references.
cmp r11, ip cmp r11, r12
blo 1b blo 1b
#else #else
...@@ -234,7 +305,7 @@ not_angel: ...@@ -234,7 +305,7 @@ not_angel:
cmphs r3, r1 @ _end < entry cmphs r3, r1 @ _end < entry
addlo r1, r1, r0 @ table. This fixes up the addlo r1, r1, r0 @ table. This fixes up the
str r1, [r11], #4 @ C references. str r1, [r11], #4 @ C references.
cmp r11, ip cmp r11, r12
blo 1b blo 1b
#endif #endif
...@@ -246,76 +317,24 @@ not_relocated: mov r0, #0 ...@@ -246,76 +317,24 @@ not_relocated: mov r0, #0
cmp r2, r3 cmp r2, r3
blo 1b blo 1b
/*
* The C runtime environment should now be setup
* sufficiently. Turn the cache on, set up some
* pointers, and start decompressing.
*/
bl cache_on
mov r1, sp @ malloc space above stack
add r2, sp, #0x10000 @ 64k max
/* /*
* Check to see if we will overwrite ourselves. * The C runtime environment should now be setup sufficiently.
* r4 = final kernel address * Set up some pointers, and start decompressing.
* r5 = start of this image * r4 = kernel execution address
* r6 = size of decompressed image * r7 = architecture ID
* r2 = end of malloc space (and therefore this image) * r8 = atags pointer
* We basically want:
* r4 >= r2 -> OK
* r4 + image length <= r5 -> OK
*/ */
cmp r4, r2 mov r0, r4
bhs wont_overwrite mov r1, sp @ malloc space above stack
add r0, r4, r6 add r2, sp, #0x10000 @ 64k max
cmp r0, r5
bls wont_overwrite
mov r5, r2 @ decompress after malloc space
mov r0, r5
mov r3, r7 mov r3, r7
bl decompress_kernel bl decompress_kernel
add r0, r0, #127 + 128 @ alignment + stack
bic r0, r0, #127 @ align the kernel length
/*
* r0 = decompressed kernel length
* r1-r3 = unused
* r4 = kernel execution address
* r5 = decompressed kernel start
* r7 = architecture ID
* r8 = atags pointer
* r9-r12,r14 = corrupted
*/
add r1, r5, r0 @ end of decompressed kernel
adr r2, reloc_start
ldr r3, LC1
add r3, r2, r3
1: ldmia r2!, {r9 - r12, r14} @ copy relocation code
stmia r1!, {r9 - r12, r14}
ldmia r2!, {r9 - r12, r14}
stmia r1!, {r9 - r12, r14}
cmp r2, r3
blo 1b
mov sp, r1
add sp, sp, #128 @ relocate the stack
bl cache_clean_flush bl cache_clean_flush
ARM( add pc, r5, r0 ) @ call relocation code bl cache_off
THUMB( add r12, r5, r0 ) mov r0, #0 @ must be zero
THUMB( mov pc, r12 ) @ call relocation code mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
/* mov pc, r4 @ call kernel
* We're not in danger of overwriting ourselves. Do this the simple way.
*
* r4 = kernel execution address
* r7 = architecture ID
*/
wont_overwrite: mov r0, r4
mov r3, r7
bl decompress_kernel
b call_kernel
.align 2 .align 2
.type LC0, #object .type LC0, #object
...@@ -323,11 +342,11 @@ LC0: .word LC0 @ r1 ...@@ -323,11 +342,11 @@ LC0: .word LC0 @ r1
.word __bss_start @ r2 .word __bss_start @ r2
.word _end @ r3 .word _end @ r3
.word _start @ r5 .word _start @ r5
.word _image_size @ r6 .word _edata @ r6
.word _image_size @ r9
.word _got_start @ r11 .word _got_start @ r11
.word _got_end @ ip .word _got_end @ ip
.word user_stack_end @ sp .word user_stack_end @ sp
LC1: .word reloc_end - reloc_start
.size LC0, . - LC0 .size LC0, . - LC0
#ifdef CONFIG_ARCH_RPC #ifdef CONFIG_ARCH_RPC
...@@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC ...@@ -353,7 +372,7 @@ params: ldr r0, =0x10000100 @ params_phys for RPC
* On exit, * On exit,
* r0, r1, r2, r3, r9, r10, r12 corrupted * r0, r1, r2, r3, r9, r10, r12 corrupted
* This routine must preserve: * This routine must preserve:
* r4, r5, r6, r7, r8 * r4, r7, r8
*/ */
.align 5 .align 5
cache_on: mov r3, #8 @ cache_on function cache_on: mov r3, #8 @ cache_on function
...@@ -550,43 +569,6 @@ __common_mmu_cache_on: ...@@ -550,43 +569,6 @@ __common_mmu_cache_on:
sub pc, lr, r0, lsr #32 @ properly flush pipeline sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif #endif
/*
* All code following this line is relocatable. It is relocated by
* the above code to the end of the decompressed kernel image and
* executed there. During this time, we have no stacks.
*
* r0 = decompressed kernel length
* r1-r3 = unused
* r4 = kernel execution address
* r5 = decompressed kernel start
* r7 = architecture ID
* r8 = atags pointer
* r9-r12,r14 = corrupted
*/
.align 5
reloc_start: add r9, r5, r0
sub r9, r9, #128 @ do not copy the stack
debug_reloc_start
mov r1, r4
1:
.rept 4
ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
stmia r1!, {r0, r2, r3, r10 - r12, r14}
.endr
cmp r5, r9
blo 1b
mov sp, r1
add sp, sp, #128 @ relocate the stack
debug_reloc_end
call_kernel: bl cache_clean_flush
bl cache_off
mov r0, #0 @ must be zero
mov r1, r7 @ restore architecture number
mov r2, r8 @ restore atags pointer
mov pc, r4 @ call kernel
/* /*
* Here follow the relocatable cache support functions for the * Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an * various processors. This is a generic hook for locating an
...@@ -791,7 +773,7 @@ proc_types: ...@@ -791,7 +773,7 @@ proc_types:
* On exit, * On exit,
* r0, r1, r2, r3, r9, r12 corrupted * r0, r1, r2, r3, r9, r12 corrupted
* This routine must preserve: * This routine must preserve:
* r4, r6, r7 * r4, r7, r8
*/ */
.align 5 .align 5
cache_off: mov r3, #12 @ cache_off function cache_off: mov r3, #12 @ cache_off function
...@@ -866,7 +848,7 @@ __armv3_mmu_cache_off: ...@@ -866,7 +848,7 @@ __armv3_mmu_cache_off:
* On exit, * On exit,
* r1, r2, r3, r9, r10, r11, r12 corrupted * r1, r2, r3, r9, r10, r11, r12 corrupted
* This routine must preserve: * This routine must preserve:
* r0, r4, r5, r6, r7 * r4, r6, r7, r8
*/ */
.align 5 .align 5
cache_clean_flush: cache_clean_flush:
...@@ -1088,7 +1070,6 @@ memdump: mov r12, r0 ...@@ -1088,7 +1070,6 @@ memdump: mov r12, r0
#endif #endif
.ltorg .ltorg
reloc_end:
.align .align
.section ".stack", "aw", %nobits .section ".stack", "aw", %nobits
......
...@@ -36,7 +36,7 @@ extern void error(char *x); ...@@ -36,7 +36,7 @@ extern void error(char *x);
#ifdef CONFIG_DEBUG_ICEDCC #ifdef CONFIG_DEBUG_ICEDCC
#ifdef CONFIG_CPU_V6 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
static void icedcc_putc(int ch) static void icedcc_putc(int ch)
{ {
......
/*
* sh7372 MMCIF loader
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Simon Horman
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mmc/sh_mmcif.h>
#include <mach/mmcif.h>
#define MMCIF_BASE (void __iomem *)0xe6bd0000
#define PORT84CR (void __iomem *)0xe6050054
#define PORT85CR (void __iomem *)0xe6050055
#define PORT86CR (void __iomem *)0xe6050056
#define PORT87CR (void __iomem *)0xe6050057
#define PORT88CR (void __iomem *)0xe6050058
#define PORT89CR (void __iomem *)0xe6050059
#define PORT90CR (void __iomem *)0xe605005a
#define PORT91CR (void __iomem *)0xe605005b
#define PORT92CR (void __iomem *)0xe605005c
#define PORT99CR (void __iomem *)0xe6050063
#define SMSTPCR3 (void __iomem *)0xe615013c
/* SH7372 specific MMCIF loader
*
* loads the zImage from an MMC card starting from block 1.
*
* The image must be start with a vrl4 header and
* the zImage must start at offset 512 of the image. That is,
* at block 2 (=byte 1024) on the media
*
* Use the following line to write the vrl4 formated zImage
* to an MMC card
* # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
*/
asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len)
{
mmcif_init_progress();
mmcif_update_progress(MMCIF_PROGRESS_ENTER);
/* Initialise MMC
* registers: PORT84CR-PORT92CR
* (MMCD0_0-MMCD0_7,MMCCMD0 Control)
* value: 0x04 - select function 4
*/
__raw_writeb(0x04, PORT84CR);
__raw_writeb(0x04, PORT85CR);
__raw_writeb(0x04, PORT86CR);
__raw_writeb(0x04, PORT87CR);
__raw_writeb(0x04, PORT88CR);
__raw_writeb(0x04, PORT89CR);
__raw_writeb(0x04, PORT90CR);
__raw_writeb(0x04, PORT91CR);
__raw_writeb(0x04, PORT92CR);
/* Initialise MMC
* registers: PORT99CR (MMCCLK0 Control)
* value: 0x10 | 0x04 - enable output | select function 4
*/
__raw_writeb(0x14, PORT99CR);
/* Enable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_INIT);
/* setup MMCIF hardware */
sh_mmcif_boot_init(MMCIF_BASE);
mmcif_update_progress(MMCIF_PROGRESS_LOAD);
/* load kernel via MMCIF interface */
sh_mmcif_boot_do_read(MMCIF_BASE, 2, /* Kernel is at block 2 */
(len + SH_MMCIF_BBS - 1) / SH_MMCIF_BBS, buf);
/* Disable clock to MMC hardware block */
__raw_writel(__raw_readl(SMSTPCR3) & (1 << 12), SMSTPCR3);
mmcif_update_progress(MMCIF_PROGRESS_DONE);
}
...@@ -43,9 +43,6 @@ SECTIONS ...@@ -43,9 +43,6 @@ SECTIONS
_etext = .; _etext = .;
/* Assume size of decompressed image is 4x the compressed image */
_image_size = (_etext - _text) * 4;
_got_start = .; _got_start = .;
.got : { *(.got) } .got : { *(.got) }
_got_end = .; _got_end = .;
......
...@@ -44,6 +44,19 @@ struct gic_chip_data { ...@@ -44,6 +44,19 @@ struct gic_chip_data {
void __iomem *cpu_base; void __iomem *cpu_base;
}; };
/*
* Supported arch specific GIC irq extension.
* Default make them NULL.
*/
struct irq_chip gic_arch_extn = {
.irq_ack = NULL,
.irq_mask = NULL,
.irq_unmask = NULL,
.irq_retrigger = NULL,
.irq_set_type = NULL,
.irq_set_wake = NULL,
};
#ifndef MAX_GIC_NR #ifndef MAX_GIC_NR
#define MAX_GIC_NR 1 #define MAX_GIC_NR 1
#endif #endif
...@@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d) ...@@ -74,6 +87,8 @@ static inline unsigned int gic_irq(struct irq_data *d)
static void gic_ack_irq(struct irq_data *d) static void gic_ack_irq(struct irq_data *d)
{ {
spin_lock(&irq_controller_lock); spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_ack)
gic_arch_extn.irq_ack(d);
writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
spin_unlock(&irq_controller_lock); spin_unlock(&irq_controller_lock);
} }
...@@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d) ...@@ -84,6 +99,8 @@ static void gic_mask_irq(struct irq_data *d)
spin_lock(&irq_controller_lock); spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
spin_unlock(&irq_controller_lock); spin_unlock(&irq_controller_lock);
} }
...@@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d) ...@@ -92,6 +109,8 @@ static void gic_unmask_irq(struct irq_data *d)
u32 mask = 1 << (d->irq % 32); u32 mask = 1 << (d->irq % 32);
spin_lock(&irq_controller_lock); spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
spin_unlock(&irq_controller_lock); spin_unlock(&irq_controller_lock);
} }
...@@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ...@@ -116,6 +135,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
spin_lock(&irq_controller_lock); spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
val = readl(base + GIC_DIST_CONFIG + confoff); val = readl(base + GIC_DIST_CONFIG + confoff);
if (type == IRQ_TYPE_LEVEL_HIGH) if (type == IRQ_TYPE_LEVEL_HIGH)
val &= ~confmask; val &= ~confmask;
...@@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ...@@ -141,32 +163,54 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return 0; return 0;
} }
static int gic_retrigger(struct irq_data *d)
{
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
return -ENXIO;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
gic_set_cpu(struct irq_data *d, const struct cpumask *mask_val, bool force) bool force)
{ {
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
unsigned int shift = (d->irq % 4) * 8; unsigned int shift = (d->irq % 4) * 8;
unsigned int cpu = cpumask_first(mask_val); unsigned int cpu = cpumask_first(mask_val);
u32 val; u32 val, mask, bit;
struct irq_desc *desc;
spin_lock(&irq_controller_lock); if (cpu >= 8)
desc = irq_to_desc(d->irq);
if (desc == NULL) {
spin_unlock(&irq_controller_lock);
return -EINVAL; return -EINVAL;
}
mask = 0xff << shift;
bit = 1 << (cpu + shift);
spin_lock(&irq_controller_lock);
d->node = cpu; d->node = cpu;
val = readl(reg) & ~(0xff << shift); val = readl(reg) & ~mask;
val |= 1 << (cpu + shift); writel(val | bit, reg);
writel(val, reg);
spin_unlock(&irq_controller_lock); spin_unlock(&irq_controller_lock);
return 0; return 0;
} }
#endif #endif
#ifdef CONFIG_PM
static int gic_set_wake(struct irq_data *d, unsigned int on)
{
int ret = -ENXIO;
if (gic_arch_extn.irq_set_wake)
ret = gic_arch_extn.irq_set_wake(d, on);
return ret;
}
#else
#define gic_set_wake NULL
#endif
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{ {
struct gic_chip_data *chip_data = get_irq_data(irq); struct gic_chip_data *chip_data = get_irq_data(irq);
...@@ -202,9 +246,11 @@ static struct irq_chip gic_chip = { ...@@ -202,9 +246,11 @@ static struct irq_chip gic_chip = {
.irq_mask = gic_mask_irq, .irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq, .irq_unmask = gic_unmask_irq,
.irq_set_type = gic_set_type, .irq_set_type = gic_set_type,
.irq_retrigger = gic_retrigger,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
.irq_set_affinity = gic_set_cpu, .irq_set_affinity = gic_set_affinity,
#endif #endif
.irq_set_wake = gic_set_wake,
}; };
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
......
...@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump) ...@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT; dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
dump->u_ssize = 0; dump->u_ssize = 0;
dump->u_debugreg[0] = tsk->thread.debug.bp[0].address; memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
dump->u_debugreg[4] = tsk->thread.debug.nsaved;
if (dump->start_stack < 0x04000000) if (dump->start_stack < 0x04000000)
dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT; dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
......
...@@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) ...@@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
* Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0. * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
*/ */
/*
* Native endian assembly bitops. nr = 0 -> word 0 bit 0.
*/
extern void _set_bit(int nr, volatile unsigned long * p);
extern void _clear_bit(int nr, volatile unsigned long * p);
extern void _change_bit(int nr, volatile unsigned long * p);
extern int _test_and_set_bit(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
extern int _test_and_change_bit(int nr, volatile unsigned long * p);
/* /*
* Little endian assembly bitops. nr = 0 -> byte 0 bit 0. * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
*/ */
extern void _set_bit_le(int nr, volatile unsigned long * p);
extern void _clear_bit_le(int nr, volatile unsigned long * p);
extern void _change_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_le(const void * p, unsigned size); extern int _find_first_zero_bit_le(const void * p, unsigned size);
extern int _find_next_zero_bit_le(const void * p, int size, int offset); extern int _find_next_zero_bit_le(const void * p, int size, int offset);
extern int _find_first_bit_le(const unsigned long *p, unsigned size); extern int _find_first_bit_le(const unsigned long *p, unsigned size);
...@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); ...@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
/* /*
* Big endian assembly bitops. nr = 0 -> byte 3 bit 0. * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
*/ */
extern void _set_bit_be(int nr, volatile unsigned long * p);
extern void _clear_bit_be(int nr, volatile unsigned long * p);
extern void _change_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
extern int _find_first_zero_bit_be(const void * p, unsigned size); extern int _find_first_zero_bit_be(const void * p, unsigned size);
extern int _find_next_zero_bit_be(const void * p, int size, int offset); extern int _find_next_zero_bit_be(const void * p, int size, int offset);
extern int _find_first_bit_be(const unsigned long *p, unsigned size); extern int _find_first_bit_be(const unsigned long *p, unsigned size);
...@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); ...@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
/* /*
* The __* form of bitops are non-atomic and may be reordered. * The __* form of bitops are non-atomic and may be reordered.
*/ */
#define ATOMIC_BITOP_LE(name,nr,p) \ #define ATOMIC_BITOP(name,nr,p) \
(__builtin_constant_p(nr) ? \ (__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
____atomic_##name(nr, p) : \
_##name##_le(nr,p))
#define ATOMIC_BITOP_BE(name,nr,p) \
(__builtin_constant_p(nr) ? \
____atomic_##name(nr, p) : \
_##name##_be(nr,p))
#else #else
#define ATOMIC_BITOP_LE(name,nr,p) _##name##_le(nr,p) #define ATOMIC_BITOP(name,nr,p) _##name(nr,p)
#define ATOMIC_BITOP_BE(name,nr,p) _##name##_be(nr,p)
#endif #endif
#define NONATOMIC_BITOP(name,nr,p) \ /*
(____nonatomic_##name(nr, p)) * Native endian atomic definitions.
*/
#define set_bit(nr,p) ATOMIC_BITOP(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP(test_and_change_bit,nr,p)
#ifndef __ARMEB__ #ifndef __ARMEB__
/* /*
* These are the little endian, atomic definitions. * These are the little endian, atomic definitions.
*/ */
#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_le(p,sz) #define find_first_bit(p,sz) _find_first_bit_le(p,sz)
...@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); ...@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define WORD_BITOFF_TO_LE(x) ((x)) #define WORD_BITOFF_TO_LE(x) ((x))
#else #else
/* /*
* These are the big endian, atomic definitions. * These are the big endian, atomic definitions.
*/ */
#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
#define find_first_bit(p,sz) _find_first_bit_be(p,sz) #define find_first_bit(p,sz) _find_first_bit_be(p,sz)
......
...@@ -12,130 +12,13 @@ ...@@ -12,130 +12,13 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/glue.h> #include <asm/glue-cache.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
#include <asm/cachetype.h> #include <asm/cachetype.h>
#include <asm/outercache.h> #include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
/* /*
* This flag is used to indicate that the page pointed to by a pte is clean * This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user. * and does not require cleaning before returning it to the user.
...@@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -249,19 +132,11 @@ extern struct cpu_cache_fns cpu_cache;
* visible to the CPU. * visible to the CPU.
*/ */
#define dmac_map_area cpu_cache.dma_map_area #define dmac_map_area cpu_cache.dma_map_area
#define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range #define dmac_flush_range cpu_cache.dma_flush_range
#else #else
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_icache_all(void); extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_all(void);
...@@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t); ...@@ -276,10 +151,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is * is visible to DMA, or data written by DMA to system memory is
* visible to the CPU. * visible to the CPU.
*/ */
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
extern void dmac_map_area(const void *, size_t, int); extern void dmac_map_area(const void *, size_t, int);
extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *); extern void dmac_flush_range(const void *, const void *);
...@@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -316,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic. * will fall through to use __flush_icache_all_generic.
*/ */
#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \ #if (defined(CONFIG_CPU_V7) && \
(defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
defined(CONFIG_SMP_ON_UP) defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all #define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
......
/*
* arch/arm/include/asm/cpu-multi32.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/page.h>
struct mm_struct;
/*
* Don't change this structure - ASM code
* relies on it.
*/
extern struct processor {
/* MISC
* get data abort address/flags
*/
void (*_data_abort)(unsigned long pc);
/*
* Retrieve prefetch fault address
*/
unsigned long (*_prefetch_abort)(unsigned long lr);
/*
* Set up any processor specifics
*/
void (*_proc_init)(void);
/*
* Disable any processor specifics
*/
void (*_proc_fin)(void);
/*
* Special stuff for a reset
*/
void (*reset)(unsigned long addr) __attribute__((noreturn));
/*
* Idle the processor
*/
int (*_do_idle)(void);
/*
* Processor architecture specific
*/
/*
* clean a virtual address range from the
* D-cache without flushing the cache.
*/
void (*dcache_clean_area)(void *addr, int size);
/*
* Set the page table
*/
void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
/*
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
} processor;
#define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
/*
* arch/arm/include/asm/cpu-single.h
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Single CPU
*/
#ifdef __STDC__
#define __catify_fn(name,x) name##x
#else
#define __catify_fn(name,x) name/**/x
#endif
#define __cpu_fn(name,x) __catify_fn(name,x)
/*
* If we are supporting multiple CPUs, then we must use a table of
* function pointers for this lot. Otherwise, we can optimise the
* table away.
*/
#define cpu_proc_init __cpu_fn(CPU_NAME,_proc_init)
#define cpu_proc_fin __cpu_fn(CPU_NAME,_proc_fin)
#define cpu_reset __cpu_fn(CPU_NAME,_reset)
#define cpu_do_idle __cpu_fn(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __cpu_fn(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __cpu_fn(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __cpu_fn(CPU_NAME,_set_pte_ext)
#include <asm/page.h>
struct mm_struct;
/* declare all the functions as extern */
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
...@@ -23,6 +23,8 @@ ...@@ -23,6 +23,8 @@
#define CPUID_EXT_ISAR4 "c2, 4" #define CPUID_EXT_ISAR4 "c2, 4"
#define CPUID_EXT_ISAR5 "c2, 5" #define CPUID_EXT_ISAR5 "c2, 5"
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \ #define read_cpuid(reg) \
({ \ ({ \
...@@ -43,7 +45,6 @@ ...@@ -43,7 +45,6 @@
__val; \ __val; \
}) })
#else #else
extern unsigned int processor_id;
#define read_cpuid(reg) (processor_id) #define read_cpuid(reg) (processor_id)
#define read_cpuid_ext(reg) 0 #define read_cpuid_ext(reg) 0
#endif #endif
......
/*
* arch/arm/include/asm/fncpy.h - helper macros for function body copying
*
* Copyright (C) 2011 Linaro Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* These macros are intended for use when there is a need to copy a low-level
* function body into special memory.
*
* For example, when reconfiguring the SDRAM controller, the code doing the
* reconfiguration may need to run from SRAM.
*
* NOTE: that the copied function body must be entirely self-contained and
* position-independent in order for this to work properly.
*
* NOTE: in order for embedded literals and data to get referenced correctly,
* the alignment of functions must be preserved when copying. To ensure this,
* the source and destination addresses for fncpy() must be aligned to a
* multiple of 8 bytes: you will be get a BUG() if this condition is not met.
* You will typically need a ".align 3" directive in the assembler where the
* function to be copied is defined, and ensure that your allocator for the
* destination buffer returns 8-byte-aligned pointers.
*
* Typical usage example:
*
* extern int f(args);
* extern uint32_t size_of_f;
* int (*copied_f)(args);
* void *sram_buffer;
*
* copied_f = fncpy(sram_buffer, &f, size_of_f);
*
* ... later, call the function: ...
*
* copied_f(args);
*
* The size of the function to be copied can't be determined from C:
* this must be determined by other means, such as adding assmbler directives
* in the file where f is defined.
*/
#ifndef __ASM_FNCPY_H
#define __ASM_FNCPY_H
#include <linux/types.h>
#include <linux/string.h>
#include <asm/bug.h>
#include <asm/cacheflush.h>
/*
* Minimum alignment requirement for the source and destination addresses
* for function copying.
*/
#define FNCPY_ALIGN 8
#define fncpy(dest_buf, funcp, size) ({ \
uintptr_t __funcp_address; \
typeof(funcp) __result; \
\
asm("" : "=r" (__funcp_address) : "0" (funcp)); \
\
/* \
* Ensure alignment of source and destination addresses, \
* disregarding the function's Thumb bit: \
*/ \
BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
(__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
\
memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
flush_icache_range((unsigned long)(dest_buf), \
(unsigned long)(dest_buf) + (size)); \
\
asm("" : "=r" (__result) \
: "0" ((uintptr_t)(dest_buf) | (__funcp_address & 1))); \
\
__result; \
})
#endif /* !__ASM_FNCPY_H */
/*
* arch/arm/include/asm/glue-cache.h
*
* Copyright (C) 1999-2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_CACHE_H
#define ASM_GLUE_CACHE_H
#include <asm/glue.h>
/*
* Cache Model
* ===========
*/
#undef _CACHE
#undef MULTI_CACHE
#if defined(CONFIG_CPU_CACHE_V3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v3
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4
# endif
#endif
#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
defined(CONFIG_CPU_ARM1026)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_FA526)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE fa
# endif
#endif
#if defined(CONFIG_CPU_ARM926T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm926
# endif
#endif
#if defined(CONFIG_CPU_ARM940T)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm940
# endif
#endif
#if defined(CONFIG_CPU_ARM946E)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE arm946
# endif
#endif
#if defined(CONFIG_CPU_CACHE_V4WB)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE v4wb
# endif
#endif
#if defined(CONFIG_CPU_XSCALE)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xscale
# endif
#endif
#if defined(CONFIG_CPU_XSC3)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE xsc3
# endif
#endif
#if defined(CONFIG_CPU_MOHAWK)
# ifdef _CACHE
# define MULTI_CACHE 1
# else
# define _CACHE mohawk
# endif
#endif
#if defined(CONFIG_CPU_FEROCEON)
# define MULTI_CACHE 1
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v6
//# endif
#endif
#if defined(CONFIG_CPU_V7)
//# ifdef _CACHE
# define MULTI_CACHE 1
//# else
//# define _CACHE v7
//# endif
#endif
#if !defined(_CACHE) && !defined(MULTI_CACHE)
#error Unknown cache maintainence model
#endif
#ifndef MULTI_CACHE
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
#define dmac_map_area __glue(_CACHE,_dma_map_area)
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
#endif
/*
* arch/arm/include/asm/glue-df.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_DF_H
#define ASM_GLUE_DF_H
#include <asm/glue.h>
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
#endif
/*
* arch/arm/include/asm/glue-pf.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000-2002 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PF_H
#define ASM_GLUE_PF_H
#include <asm/glue.h>
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif
/*
* arch/arm/include/asm/glue-proc.h
*
* Copyright (C) 1997-1999 Russell King
* Copyright (C) 2000 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_GLUE_PROC_H
#define ASM_GLUE_PROC_H
#include <asm/glue.h>
/*
* Work out if we need multiple CPU support
*/
#undef MULTI_CPU
#undef CPU_NAME
/*
* CPU_NAME - the prefix for CPU related functions
*/
#ifdef CONFIG_CPU_ARM610
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm6
# endif
#endif
#ifdef CONFIG_CPU_ARM7TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM710
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm7
# endif
#endif
#ifdef CONFIG_CPU_ARM720T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm720
# endif
#endif
#ifdef CONFIG_CPU_ARM740T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm740
# endif
#endif
#ifdef CONFIG_CPU_ARM9TDMI
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#ifndef MULTI_CPU
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
#define cpu_reset __glue(CPU_NAME,_reset)
#define cpu_do_idle __glue(CPU_NAME,_do_idle)
#define cpu_dcache_clean_area __glue(CPU_NAME,_dcache_clean_area)
#define cpu_do_switch_mm __glue(CPU_NAME,_switch_mm)
#define cpu_set_pte_ext __glue(CPU_NAME,_set_pte_ext)
#define cpu_suspend_size __glue(CPU_NAME,_suspend_size)
#define cpu_do_suspend __glue(CPU_NAME,_do_suspend)
#define cpu_do_resume __glue(CPU_NAME,_do_resume)
#endif
#endif
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
*/ */
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef __STDC__ #ifdef __STDC__
#define ____glue(name,fn) name##fn #define ____glue(name,fn) name##fn
#else #else
...@@ -23,141 +22,4 @@ ...@@ -23,141 +22,4 @@
#endif #endif
#define __glue(name,fn) ____glue(name,fn) #define __glue(name,fn) ____glue(name,fn)
/*
* Data Abort Model
* ================
*
* We have the following to choose from:
* arm6 - ARM6 style
* arm7 - ARM7 style
* v4_early - ARMv4 without Thumb early abort handler
* v4t_late - ARMv4 with Thumb late abort handler
* v4t_early - ARMv4 with Thumb early abort handler
* v5tej_early - ARMv5 with Thumb and Java early abort handler
* xscale - ARMv5 with Thumb with Xscale extensions
* v6_early - ARMv6 generic early abort handler
* v7_early - ARMv7 generic early abort handler
*/
#undef CPU_DABORT_HANDLER
#undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM610)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm6_data_abort
# endif
#endif
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_LV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_late_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v4t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5TJ
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5tj_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV5T
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v5t_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV6
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v6_early_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV7
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER v7_early_abort
# endif
#endif
#ifndef CPU_DABORT_HANDLER
#error Unknown data abort handler type
#endif
/*
* Prefetch Abort Model
* ================
*
* We have the following to choose from:
* legacy - no IFSR, no IFAR
* v6 - ARMv6: IFSR, no IFAR
* v7 - ARMv7: IFSR and IFAR
*/
#undef CPU_PABORT_HANDLER
#undef MULTI_PABORT
#ifdef CONFIG_CPU_PABRT_LEGACY
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER legacy_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V6
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v6_pabort
# endif
#endif
#ifdef CONFIG_CPU_PABRT_V7
# ifdef CPU_PABORT_HANDLER
# define MULTI_PABORT 1
# else
# define CPU_PABORT_HANDLER v7_pabort
# endif
#endif
#ifndef CPU_PABORT_HANDLER
#error Unknown prefetch abort handler type
#endif
#endif #endif
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void __iomem *gic_cpu_base_addr; extern void __iomem *gic_cpu_base_addr;
extern struct irq_chip gic_arch_extn;
void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *); void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
void gic_secondary_init(unsigned int); void gic_secondary_init(unsigned int);
......
...@@ -19,11 +19,36 @@ ...@@ -19,11 +19,36 @@
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
/*
* The reason for kmap_high_get() is to ensure that the currently kmap'd
* page usage count does not decrease to zero while we're using its
* existing virtual mapping in an atomic context. With a VIVT cache this
* is essential to do, but with a VIPT cache this is only an optimization
* so not to pay the price of establishing a second mapping if an existing
* one can be used. However, on platforms without hardware TLB maintenance
* broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
* the locking involved must also disable IRQs which is incompatible with
* the IPI mechanism used by global TLB operations.
*/
#define ARCH_NEEDS_KMAP_HIGH_GET #define ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
#undef ARCH_NEEDS_KMAP_HIGH_GET
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
#error "The sum of features in your kernel config cannot be supported together"
#endif
#endif
extern void *kmap_high(struct page *page); #ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page); extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page); #else
static inline void *kmap_high_get(struct page *page)
{
return NULL;
}
#endif
/* /*
* The following functions are already defined by <linux/highmem.h> * The following functions are already defined by <linux/highmem.h>
......
...@@ -34,4 +34,35 @@ do { \ ...@@ -34,4 +34,35 @@ do { \
raw_spin_unlock(&desc->lock); \ raw_spin_unlock(&desc->lock); \
} while(0) } while(0)
#ifndef __ASSEMBLY__
/*
* Entry/exit functions for chained handlers where the primary IRQ chip
* may implement either fasteoi or level-trigger flow control.
*/
static inline void chained_irq_enter(struct irq_chip *chip,
struct irq_desc *desc)
{
/* FastEOI controllers require no action on entry. */
if (chip->irq_eoi)
return;
if (chip->irq_mask_ack) {
chip->irq_mask_ack(&desc->irq_data);
} else {
chip->irq_mask(&desc->irq_data);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
}
}
static inline void chained_irq_exit(struct irq_chip *chip,
struct irq_desc *desc)
{
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
else
chip->irq_unmask(&desc->irq_data);
}
#endif
#endif #endif
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/const.h> #include <linux/const.h>
#include <linux/types.h>
#include <mach/memory.h> #include <mach/memory.h>
#include <asm/sizes.h> #include <asm/sizes.h>
...@@ -132,21 +133,11 @@ ...@@ -132,21 +133,11 @@
#define DTCM_OFFSET UL(0xfffe8000) #define DTCM_OFFSET UL(0xfffe8000)
#endif #endif
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#ifndef __virt_to_phys
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
/* /*
* Convert a physical address to a Page Frame Number and back * Convert a physical address to a Page Frame Number and back
*/ */
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT) #define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) #define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
/* /*
* Convert a page to/from a physical address * Convert a page to/from a physical address
...@@ -156,6 +147,62 @@ ...@@ -156,6 +147,62 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#ifndef __virt_to_phys
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/*
* Constants used to force the right instruction encodings and shifts
* so that all we need to do is modify the 8-bit constant field.
*/
#define __PV_BITS_31_24 0x81000000
#define __PV_BITS_23_16 0x00810000
extern unsigned long __pv_phys_offset;
#define PHYS_OFFSET __pv_phys_offset
#define __pv_stub(from,to,instr,type) \
__asm__("@ __pv_stub\n" \
"1: " instr " %0, %1, %2\n" \
" .pushsection .pv_table,\"a\"\n" \
" .long 1b\n" \
" .popsection\n" \
: "=r" (to) \
: "r" (from), "I" (type))
static inline unsigned long __virt_to_phys(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "add", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "add", __PV_BITS_23_16);
#endif
return t;
}
static inline unsigned long __phys_to_virt(unsigned long x)
{
unsigned long t;
__pv_stub(x, t, "sub", __PV_BITS_31_24);
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
__pv_stub(t, t, "sub", __PV_BITS_23_16);
#endif
return t;
}
#else
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
#endif
#ifndef PHYS_OFFSET
#define PHYS_OFFSET PLAT_PHYS_OFFSET
#endif
/* /*
* The DMA mask corresponding to the maximum bus address allocatable * The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA * using GFP_DMA. The default here places no restriction on DMA
...@@ -188,12 +235,12 @@ ...@@ -188,12 +235,12 @@
* translation for translating DMA addresses. Use the driver * translation for translating DMA addresses. Use the driver
* DMA support - see dma-mapping.h. * DMA support - see dma-mapping.h.
*/ */
static inline unsigned long virt_to_phys(const volatile void *x) static inline phys_addr_t virt_to_phys(const volatile void *x)
{ {
return __virt_to_phys((unsigned long)(x)); return __virt_to_phys((unsigned long)(x));
} }
static inline void *phys_to_virt(unsigned long x) static inline void *phys_to_virt(phys_addr_t x)
{ {
return (void *)(__phys_to_virt((unsigned long)(x))); return (void *)(__phys_to_virt((unsigned long)(x)));
} }
......
...@@ -25,8 +25,31 @@ struct mod_arch_specific { ...@@ -25,8 +25,31 @@ struct mod_arch_specific {
}; };
/* /*
* Include the ARM architecture version. * Add the ARM architecture version to the version magic string
*/ */
#define MODULE_ARCH_VERMAGIC "ARMv" __stringify(__LINUX_ARM_ARCH__) " " #define MODULE_ARCH_VERMAGIC_ARMVSN "ARMv" __stringify(__LINUX_ARM_ARCH__) " "
/* Add __virt_to_phys patching state as well */
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
#define MODULE_ARCH_VERMAGIC_P2V "p2v16 "
#else
#define MODULE_ARCH_VERMAGIC_P2V "p2v8 "
#endif
#else
#define MODULE_ARCH_VERMAGIC_P2V ""
#endif
/* Add instruction set architecture tag to distinguish ARM/Thumb kernels */
#ifdef CONFIG_THUMB2_KERNEL
#define MODULE_ARCH_VERMAGIC_ARMTHUMB "thumb2 "
#else
#define MODULE_ARCH_VERMAGIC_ARMTHUMB ""
#endif
#define MODULE_ARCH_VERMAGIC \
MODULE_ARCH_VERMAGIC_ARMVSN \
MODULE_ARCH_VERMAGIC_ARMTHUMB \
MODULE_ARCH_VERMAGIC_P2V
#endif /* _ASM_ARM_MODULE_H */ #endif /* _ASM_ARM_MODULE_H */
...@@ -31,6 +31,7 @@ struct outer_cache_fns { ...@@ -31,6 +31,7 @@ struct outer_cache_fns {
#ifdef CONFIG_OUTER_CACHE_SYNC #ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void); void (*sync)(void);
#endif #endif
void (*set_debug)(unsigned long);
}; };
#ifdef CONFIG_OUTER_CACHE #ifdef CONFIG_OUTER_CACHE
......
...@@ -13,250 +13,86 @@ ...@@ -13,250 +13,86 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/glue-proc.h>
#include <asm/page.h>
/* #ifndef __ASSEMBLY__
* Work out if we need multiple CPU support
*/ struct mm_struct;
#undef MULTI_CPU
#undef CPU_NAME
/* /*
* CPU_NAME - the prefix for CPU related functions * Don't change this structure - ASM code relies on it.
*/ */
extern struct processor {
#ifdef CONFIG_CPU_ARM610 /* MISC
# ifdef CPU_NAME * get data abort address/flags
# undef MULTI_CPU */
# define MULTI_CPU void (*_data_abort)(unsigned long pc);
# else /*
# define CPU_NAME cpu_arm6 * Retrieve prefetch fault address
# endif */
#endif unsigned long (*_prefetch_abort)(unsigned long lr);
/*
#ifdef CONFIG_CPU_ARM7TDMI * Set up any processor specifics
# ifdef CPU_NAME */
# undef MULTI_CPU void (*_proc_init)(void);
# define MULTI_CPU /*
# else * Disable any processor specifics
# define CPU_NAME cpu_arm7tdmi */
# endif void (*_proc_fin)(void);
#endif /*
* Special stuff for a reset
#ifdef CONFIG_CPU_ARM710 */
# ifdef CPU_NAME void (*reset)(unsigned long addr) __attribute__((noreturn));
# undef MULTI_CPU /*
# define MULTI_CPU * Idle the processor
# else */
# define CPU_NAME cpu_arm7 int (*_do_idle)(void);
# endif /*
#endif * Processor architecture specific
*/
#ifdef CONFIG_CPU_ARM720T /*
# ifdef CPU_NAME * clean a virtual address range from the
# undef MULTI_CPU * D-cache without flushing the cache.
# define MULTI_CPU */
# else void (*dcache_clean_area)(void *addr, int size);
# define CPU_NAME cpu_arm720
# endif /*
#endif * Set the page table
*/
#ifdef CONFIG_CPU_ARM740T void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm);
# ifdef CPU_NAME /*
# undef MULTI_CPU * Set a possibly extended PTE. Non-extended PTEs should
# define MULTI_CPU * ignore 'ext'.
# else */
# define CPU_NAME cpu_arm740 void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
# endif
#endif /* Suspend/resume */
unsigned int suspend_size;
#ifdef CONFIG_CPU_ARM9TDMI void (*do_suspend)(void *);
# ifdef CPU_NAME void (*do_resume)(void *);
# undef MULTI_CPU } processor;
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm9tdmi
# endif
#endif
#ifdef CONFIG_CPU_ARM920T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm920
# endif
#endif
#ifdef CONFIG_CPU_ARM922T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm922
# endif
#endif
#ifdef CONFIG_CPU_FA526
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_fa526
# endif
#endif
#ifdef CONFIG_CPU_ARM925T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm925
# endif
#endif
#ifdef CONFIG_CPU_ARM926T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm926
# endif
#endif
#ifdef CONFIG_CPU_ARM940T
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm940
# endif
#endif
#ifdef CONFIG_CPU_ARM946E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm946
# endif
#endif
#ifdef CONFIG_CPU_SA110
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa110
# endif
#endif
#ifdef CONFIG_CPU_SA1100
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_sa1100
# endif
#endif
#ifdef CONFIG_CPU_ARM1020
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020
# endif
#endif
#ifdef CONFIG_CPU_ARM1020E
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1020e
# endif
#endif
#ifdef CONFIG_CPU_ARM1022
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1022
# endif
#endif
#ifdef CONFIG_CPU_ARM1026
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_arm1026
# endif
#endif
#ifdef CONFIG_CPU_XSCALE
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xscale
# endif
#endif
#ifdef CONFIG_CPU_XSC3
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_xsc3
# endif
#endif
#ifdef CONFIG_CPU_MOHAWK
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_mohawk
# endif
#endif
#ifdef CONFIG_CPU_FEROCEON
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_feroceon
# endif
#endif
#ifdef CONFIG_CPU_V6
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v6
# endif
#endif
#ifdef CONFIG_CPU_V7
# ifdef CPU_NAME
# undef MULTI_CPU
# define MULTI_CPU
# else
# define CPU_NAME cpu_v7
# endif
#endif
#ifndef __ASSEMBLY__
#ifndef MULTI_CPU #ifndef MULTI_CPU
#include <asm/cpu-single.h> extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
#else #else
#include <asm/cpu-multi32.h> #define cpu_proc_init() processor._proc_init()
#define cpu_proc_fin() processor._proc_fin()
#define cpu_reset(addr) processor.reset(addr)
#define cpu_do_idle() processor._do_idle()
#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz)
#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext)
#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm)
#endif #endif
extern void cpu_resume(void);
#include <asm/memory.h> #include <asm/memory.h>
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -29,19 +29,7 @@ ...@@ -29,19 +29,7 @@
#define STACK_TOP_MAX TASK_SIZE #define STACK_TOP_MAX TASK_SIZE
#endif #endif
union debug_insn {
u32 arm;
u16 thumb;
};
struct debug_entry {
u32 address;
union debug_insn insn;
};
struct debug_info { struct debug_info {
int nsaved;
struct debug_entry bp[2];
#ifdef CONFIG_HAVE_HW_BREAKPOINT #ifdef CONFIG_HAVE_HW_BREAKPOINT
struct perf_event *hbp[ARM_MAX_HBP_SLOTS]; struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
#endif #endif
...@@ -95,7 +83,7 @@ extern void release_thread(struct task_struct *); ...@@ -95,7 +83,7 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() smp_mb() #define cpu_relax() smp_mb()
#else #else
#define cpu_relax() barrier() #define cpu_relax() barrier()
......
...@@ -130,8 +130,6 @@ struct pt_regs { ...@@ -130,8 +130,6 @@ struct pt_regs {
#ifdef __KERNEL__ #ifdef __KERNEL__
#define arch_has_single_step() (1)
#define user_mode(regs) \ #define user_mode(regs) \
(((regs)->ARM_cpsr & 0xf) == 0) (((regs)->ARM_cpsr & 0xf) == 0)
......
#ifndef __ASMARM_ARCH_SCU_H #ifndef __ASMARM_ARCH_SCU_H
#define __ASMARM_ARCH_SCU_H #define __ASMARM_ARCH_SCU_H
#define SCU_PM_NORMAL 0
#define SCU_PM_DORMANT 2
#define SCU_PM_POWEROFF 3
#ifndef __ASSEMBLER__
unsigned int scu_get_core_count(void __iomem *); unsigned int scu_get_core_count(void __iomem *);
void scu_enable(void __iomem *); void scu_enable(void __iomem *);
int scu_power_mode(void __iomem *, unsigned int);
#endif
#endif #endif
...@@ -5,17 +5,52 @@ ...@@ -5,17 +5,52 @@
#error SMP not supported on pre-ARMv6 CPUs #error SMP not supported on pre-ARMv6 CPUs
#endif #endif
/*
* sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
* extensions, so when running on UP, we have to patch these instructions away.
*/
#define ALT_SMP(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
" .long 9998b\n" \
" " up "\n" \
" .popsection\n"
#ifdef CONFIG_THUMB2_KERNEL
#define SEV ALT_SMP("sev.w", "nop.w")
/*
* For Thumb-2, special care is needed to ensure that the conditional WFE
* instruction really does assemble to exactly 4 bytes (as required by
* the SMP_ON_UP fixup code). By itself "wfene" might cause the
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
* To avoid this unpredictableness, an approprite IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
#define WFE(cond) ALT_SMP( \
"it " cond "\n\t" \
"wfe" cond ".n", \
\
"nop.w" \
)
#else
#define SEV ALT_SMP("sev", "nop")
#define WFE(cond) ALT_SMP("wfe" cond, "nop")
#endif
static inline void dsb_sev(void) static inline void dsb_sev(void)
{ {
#if __LINUX_ARM_ARCH__ >= 7 #if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ ( __asm__ __volatile__ (
"dsb\n" "dsb\n"
"sev" SEV
); );
#elif defined(CONFIG_CPU_32v6K) #else
__asm__ __volatile__ ( __asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n" "mcr p15, 0, %0, c7, c10, 4\n"
"sev" SEV
: : "r" (0) : : "r" (0)
); );
#endif #endif
...@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) ...@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrex %0, [%1]\n" "1: ldrex %0, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K WFE("ne")
" wfene\n"
#endif
" strexeq %0, %2, [%1]\n" " strexeq %0, %2, [%1]\n"
" teqeq %0, #0\n" " teqeq %0, #0\n"
" bne 1b" " bne 1b"
...@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw) ...@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
__asm__ __volatile__( __asm__ __volatile__(
"1: ldrex %0, [%1]\n" "1: ldrex %0, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
#ifdef CONFIG_CPU_32v6K WFE("ne")
" wfene\n"
#endif
" strexeq %0, %2, [%1]\n" " strexeq %0, %2, [%1]\n"
" teq %0, #0\n" " teq %0, #0\n"
" bne 1b" " bne 1b"
...@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw) ...@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
"1: ldrex %0, [%2]\n" "1: ldrex %0, [%2]\n"
" adds %0, %0, #1\n" " adds %0, %0, #1\n"
" strexpl %1, %0, [%2]\n" " strexpl %1, %0, [%2]\n"
#ifdef CONFIG_CPU_32v6K WFE("mi")
" wfemi\n"
#endif
" rsbpls %0, %1, #0\n" " rsbpls %0, %1, #0\n"
" bmi 1b" " bmi 1b"
: "=&r" (tmp), "=&r" (tmp2) : "=&r" (tmp), "=&r" (tmp2)
......
...@@ -347,6 +347,7 @@ void cpu_idle_wait(void); ...@@ -347,6 +347,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#if __LINUX_ARM_ARCH__ < 6 #if __LINUX_ARM_ARCH__ < 6
/* min ARCH < ARMv6 */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#error "SMP is not supported on this platform" #error "SMP is not supported on this platform"
...@@ -365,7 +366,7 @@ void cpu_idle_wait(void); ...@@ -365,7 +366,7 @@ void cpu_idle_wait(void);
#include <asm-generic/cmpxchg.h> #include <asm-generic/cmpxchg.h>
#endif #endif
#else /* __LINUX_ARM_ARCH__ >= 6 */ #else /* min ARCH >= ARMv6 */
extern void __bad_cmpxchg(volatile void *ptr, int size); extern void __bad_cmpxchg(volatile void *ptr, int size);
...@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long oldval, res; unsigned long oldval, res;
switch (size) { switch (size) {
#ifdef CONFIG_CPU_32v6K #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
case 1: case 1:
do { do {
asm volatile("@ __cmpxchg1\n" asm volatile("@ __cmpxchg1\n"
...@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
: "memory", "cc"); : "memory", "cc");
} while (res); } while (res);
break; break;
#endif /* CONFIG_CPU_32v6K */ #endif
case 4: case 4:
do { do {
asm volatile("@ __cmpxchg4\n" asm volatile("@ __cmpxchg4\n"
...@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long ret; unsigned long ret;
switch (size) { switch (size) {
#ifndef CONFIG_CPU_32v6K #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1: case 1:
case 2: case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size); ret = __cmpxchg_local_generic(ptr, old, new, size);
break; break;
#endif /* !CONFIG_CPU_32v6K */ #endif
default: default:
ret = __cmpxchg(ptr, old, new, size); ret = __cmpxchg(ptr, old, new, size);
} }
...@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
(unsigned long)(n), \ (unsigned long)(n), \
sizeof(*(ptr)))) sizeof(*(ptr))))
#ifdef CONFIG_CPU_32v6K #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
/* /*
* Note : ARMv7-M (currently unsupported by Linux) does not support * Note : ARMv7-M (currently unsupported by Linux) does not support
...@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr, ...@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
(unsigned long long)(o), \ (unsigned long long)(o), \
(unsigned long long)(n))) (unsigned long long)(n)))
#else /* !CONFIG_CPU_32v6K */ #else /* min ARCH = ARMv6 */
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif /* CONFIG_CPU_32v6K */ #endif
#endif /* __LINUX_ARM_ARCH__ >= 6 */ #endif /* __LINUX_ARM_ARCH__ >= 6 */
......
...@@ -28,15 +28,14 @@ ...@@ -28,15 +28,14 @@
#define tls_emu 1 #define tls_emu 1
#define has_tls_reg 1 #define has_tls_reg 1
#define set_tls set_tls_none #define set_tls set_tls_none
#elif __LINUX_ARM_ARCH__ >= 7 || \ #elif defined(CONFIG_CPU_V6)
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
#define tls_emu 0
#define has_tls_reg 1
#define set_tls set_tls_v6k
#elif __LINUX_ARM_ARCH__ == 6
#define tls_emu 0 #define tls_emu 0
#define has_tls_reg (elf_hwcap & HWCAP_TLS) #define has_tls_reg (elf_hwcap & HWCAP_TLS)
#define set_tls set_tls_v6 #define set_tls set_tls_v6
#elif defined(CONFIG_CPU_32v6K)
#define tls_emu 0
#define has_tls_reg 1
#define set_tls set_tls_v6k
#else #else
#define tls_emu 0 #define tls_emu 0
#define has_tls_reg 0 #define has_tls_reg 0
......
...@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr) ...@@ -45,6 +45,7 @@ static inline int in_exception_text(unsigned long ptr)
extern void __init early_trap_init(void); extern void __init early_trap_init(void);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
extern void *vectors_page; extern void *vectors_page;
......
...@@ -71,7 +71,7 @@ struct user{ ...@@ -71,7 +71,7 @@ struct user{
/* the registers. */ /* the registers. */
unsigned long magic; /* To uniquely identify a core file */ unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */ char u_comm[32]; /* User command that was responsible */
int u_debugreg[8]; int u_debugreg[8]; /* No longer used */
struct user_fp u_fp; /* FP state */ struct user_fp u_fp; /* FP state */
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */ struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */ /* the FP registers. */
......
...@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o ...@@ -29,6 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_PM) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
......
...@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp); ...@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp);
#endif #endif
/* bitops */ /* bitops */
EXPORT_SYMBOL(_set_bit_le); EXPORT_SYMBOL(_set_bit);
EXPORT_SYMBOL(_test_and_set_bit_le); EXPORT_SYMBOL(_test_and_set_bit);
EXPORT_SYMBOL(_clear_bit_le); EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit_le); EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit_le); EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit_le); EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le); EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le); EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le); EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le); EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__ #ifdef __ARMEB__
EXPORT_SYMBOL(_set_bit_be);
EXPORT_SYMBOL(_test_and_set_bit_be);
EXPORT_SYMBOL(_clear_bit_be);
EXPORT_SYMBOL(_test_and_clear_bit_be);
EXPORT_SYMBOL(_change_bit_be);
EXPORT_SYMBOL(_test_and_change_bit_be);
EXPORT_SYMBOL(_find_first_zero_bit_be); EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be); EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be); EXPORT_SYMBOL(_find_first_bit_be);
...@@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount); ...@@ -170,3 +164,7 @@ EXPORT_SYMBOL(mcount);
#endif #endif
EXPORT_SYMBOL(__gnu_mcount_nc); EXPORT_SYMBOL(__gnu_mcount_nc);
#endif #endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
#endif
...@@ -13,6 +13,9 @@ ...@@ -13,6 +13,9 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/memory.h> #include <asm/memory.h>
...@@ -113,6 +116,14 @@ int main(void) ...@@ -113,6 +116,14 @@ int main(void)
#endif #endif
#ifdef MULTI_PABORT #ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
#ifdef MULTI_CPU
DEFINE(CPU_SLEEP_SIZE, offsetof(struct processor, suspend_size));
DEFINE(CPU_DO_SUSPEND, offsetof(struct processor, do_suspend));
DEFINE(CPU_DO_RESUME, offsetof(struct processor, do_resume));
#endif
#ifdef MULTI_CACHE
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
#endif #endif
BLANK(); BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
......
...@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw) ...@@ -583,6 +583,11 @@ void __init pci_common_init(struct hw_pci *hw)
* Assign resources. * Assign resources.
*/ */
pci_bus_assign_resources(bus); pci_bus_assign_resources(bus);
/*
* Enable bridges
*/
pci_enable_bridges(bus);
} }
/* /*
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
.macro addruart, rp, rv .macro addruart, rp, rv
.endm .endm
#if defined(CONFIG_CPU_V6) #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
.macro senduart, rd, rx .macro senduart, rd, rx
mcr p14, 0, \rd, c0, c5, 0 mcr p14, 0, \rd, c0, c5, 0
......
...@@ -16,7 +16,8 @@ ...@@ -16,7 +16,8 @@
*/ */
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/glue.h> #include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h> #include <asm/vfpmacros.h>
#include <mach/entry-macro.S> #include <mach/entry-macro.S>
#include <asm/thread_notify.h> #include <asm/thread_notify.h>
......
...@@ -76,13 +76,13 @@ ...@@ -76,13 +76,13 @@
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr .macro svc_exit, rpsr
msr spsr_cxsf, \rpsr msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_32v6K) #if defined(CONFIG_CPU_V6)
clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#elif defined (CONFIG_CPU_V6)
ldr r0, [sp] ldr r0, [sp]
strex r1, r2, [sp] @ clear the exclusive monitor strex r1, r2, [sp] @ clear the exclusive monitor
ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
#elif defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#else #else
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#endif #endif
...@@ -92,10 +92,10 @@ ...@@ -92,10 +92,10 @@
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_32v6K) #if defined(CONFIG_CPU_V6)
clrex @ clear the exclusive monitor
#elif defined (CONFIG_CPU_V6)
strex r1, r2, [sp] @ clear the exclusive monitor strex r1, r2, [sp] @ clear the exclusive monitor
#elif defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
#endif #endif
.if \fast .if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
......
...@@ -25,83 +25,6 @@ ...@@ -25,83 +25,6 @@
* machine ID for example). * machine ID for example).
*/ */
__HEAD __HEAD
__error_a:
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
adr r0, str_a1
bl printascii
mov r0, r4
bl printhex8
adr r0, str_a2
bl printascii
adr r3, __lookup_machine_type_data
ldmia r3, {r4, r5, r6} @ get machine desc list
sub r4, r3, r4 @ get offset between virt&phys
add r5, r5, r4 @ convert virt addresses to
add r6, r6, r4 @ physical address space
1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
bl printhex8
mov r0, #'\t'
bl printch
ldr r0, [r5, #MACHINFO_NAME] @ get machine name
add r0, r0, r4
bl printascii
mov r0, #'\n'
bl printch
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
adr r0, str_a3
bl printascii
b __error
ENDPROC(__error_a)
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#else
b __error
#endif
/*
* Lookup machine architecture in the linker-build list of architectures.
* Note that we can't use the absolute addresses for the __arch_info
* lists since we aren't running with the MMU on (and therefore, we are
* not in the correct address space). We have to calculate the offset.
*
* r1 = machine architecture number
* Returns:
* r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space
*/
__lookup_machine_type:
adr r3, __lookup_machine_type_data
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
teq r3, r1 @ matches loader number?
beq 2f @ found
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
mov r5, #0 @ unknown machine
2: mov pc, lr
ENDPROC(__lookup_machine_type)
/*
* Look in arch/arm/kernel/arch.[ch] for information about the
* __arch_info structures.
*/
.align 2
.type __lookup_machine_type_data, %object
__lookup_machine_type_data:
.long .
.long __arch_info_begin
.long __arch_info_end
.size __lookup_machine_type_data, . - __lookup_machine_type_data
/* Determine validity of the r2 atags pointer. The heuristic requires /* Determine validity of the r2 atags pointer. The heuristic requires
* that the pointer be aligned, in the first 16k of physical RAM and * that the pointer be aligned, in the first 16k of physical RAM and
...@@ -109,8 +32,6 @@ __lookup_machine_type_data: ...@@ -109,8 +32,6 @@ __lookup_machine_type_data:
* of this function may be more lenient with the physical address and * of this function may be more lenient with the physical address and
* may also be able to move the ATAGS block if necessary. * may also be able to move the ATAGS block if necessary.
* *
* r8 = machinfo
*
* Returns: * Returns:
* r2 either valid atags pointer, or zero * r2 either valid atags pointer, or zero
* r5, r6 corrupted * r5, r6 corrupted
...@@ -184,17 +105,6 @@ __mmap_switched_data: ...@@ -184,17 +105,6 @@ __mmap_switched_data:
.long init_thread_union + THREAD_START_SP @ sp .long init_thread_union + THREAD_START_SP @ sp
.size __mmap_switched_data, . - __mmap_switched_data .size __mmap_switched_data, . - __mmap_switched_data
/*
* This provides a C-API version of __lookup_machine_type
*/
ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr}
mov r1, r0
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}
ENDPROC(lookup_machine_type)
/* /*
* This provides a C-API version of __lookup_processor_type * This provides a C-API version of __lookup_processor_type
*/ */
......
...@@ -44,9 +44,6 @@ ENTRY(stext) ...@@ -44,9 +44,6 @@ ENTRY(stext)
bl __lookup_processor_type @ r5=procinfo r9=cpuid bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)? movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p' beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'
adr lr, BSYM(__after_proc_init) @ return (PIC) address adr lr, BSYM(__after_proc_init) @ return (PIC) address
ARM( add pc, r10, #PROCINFO_INITFUNC ) ARM( add pc, r10, #PROCINFO_INITFUNC )
......
...@@ -26,14 +26,6 @@ ...@@ -26,14 +26,6 @@
#include <mach/debug-macro.S> #include <mach/debug-macro.S>
#endif #endif
#if (PHYS_OFFSET & 0x001fffff)
#error "PHYS_OFFSET must be at an even 2MiB boundary!"
#endif
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)
/* /*
* swapper_pg_dir is the virtual address of the initial page table. * swapper_pg_dir is the virtual address of the initial page table.
* We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must
...@@ -41,6 +33,7 @@ ...@@ -41,6 +33,7 @@
* the least significant 16 bits to be 0x8000, but we could probably * the least significant 16 bits to be 0x8000, but we could probably
* relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
*/ */
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif #endif
...@@ -48,8 +41,8 @@ ...@@ -48,8 +41,8 @@
.globl swapper_pg_dir .globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000
.macro pgtbl, rd .macro pgtbl, rd, phys
ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) add \rd, \phys, #TEXT_OFFSET - 0x4000
.endm .endm
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
...@@ -87,25 +80,33 @@ ENTRY(stext) ...@@ -87,25 +80,33 @@ ENTRY(stext)
movs r10, r5 @ invalid processor (r5=0)? movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p' beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)? #ifndef CONFIG_XIP_KERNEL
THUMB( it eq ) @ force fixup-able long branch encoding adr r3, 2f
beq __error_a @ yes, error 'a' ldmia r3, {r4, r8}
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
ldr r8, =PLAT_PHYS_OFFSET
#endif
/* /*
* r1 = machine no, r2 = atags, * r1 = machine no, r2 = atags,
* r8 = machinfo, r9 = cpuid, r10 = procinfo * r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/ */
bl __vet_atags bl __vet_atags
#ifdef CONFIG_SMP_ON_UP #ifdef CONFIG_SMP_ON_UP
bl __fixup_smp bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
bl __fixup_pv_table
#endif #endif
bl __create_page_tables bl __create_page_tables
/* /*
* The following calls CPU specific code in a position independent * The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of * manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_machine_type * xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be * above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value. * turned on, and r0 will hold the CPU control register value.
*/ */
...@@ -118,22 +119,24 @@ ENTRY(stext) ...@@ -118,22 +119,24 @@ ENTRY(stext)
1: b __enable_mmu 1: b __enable_mmu
ENDPROC(stext) ENDPROC(stext)
.ltorg .ltorg
#ifndef CONFIG_XIP_KERNEL
2: .long .
.long PAGE_OFFSET
#endif
/* /*
* Setup the initial page tables. We only setup the barest * Setup the initial page tables. We only setup the barest
* amount which are required to get the kernel running, which * amount which are required to get the kernel running, which
* generally means mapping in the kernel code. * generally means mapping in the kernel code.
* *
* r8 = machinfo * r8 = phys_offset, r9 = cpuid, r10 = procinfo
* r9 = cpuid
* r10 = procinfo
* *
* Returns: * Returns:
* r0, r3, r5-r7 corrupted * r0, r3, r5-r7 corrupted
* r4 = physical page table address * r4 = physical page table address
*/ */
__create_page_tables: __create_page_tables:
pgtbl r4 @ page table address pgtbl r4, r8 @ page table address
/* /*
* Clear the 16K level 1 swapper page table * Clear the 16K level 1 swapper page table
...@@ -189,10 +192,8 @@ __create_page_tables: ...@@ -189,10 +192,8 @@ __create_page_tables:
/* /*
* Map some ram to cover our .data and .bss areas. * Map some ram to cover our .data and .bss areas.
*/ */
orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) add r3, r8, #TEXT_OFFSET
.if (KERNEL_RAM_PADDR & 0x00f00000) orr r3, r3, r7
orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
.endif
add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
ldr r6, =(_end - 1) ldr r6, =(_end - 1)
...@@ -205,14 +206,17 @@ __create_page_tables: ...@@ -205,14 +206,17 @@ __create_page_tables:
#endif #endif
/* /*
* Then map first 1MB of ram in case it contains our boot params. * Then map boot params address in r2 or
* the first 1MB of ram if boot params address is not specified.
*/ */
add r0, r4, #PAGE_OFFSET >> 18 mov r0, r2, lsr #20
orr r6, r7, #(PHYS_OFFSET & 0xff000000) movs r0, r0, lsl #20
.if (PHYS_OFFSET & 0x00f00000) moveq r0, r8
orr r6, r6, #(PHYS_OFFSET & 0x00f00000) sub r3, r0, r8
.endif add r3, r3, #PAGE_OFFSET
str r6, [r0] add r3, r4, r3, lsr #18
orr r6, r7, r0
str r6, [r3]
#ifdef CONFIG_DEBUG_LL #ifdef CONFIG_DEBUG_LL
#ifndef CONFIG_DEBUG_ICEDCC #ifndef CONFIG_DEBUG_ICEDCC
...@@ -457,4 +461,129 @@ ENTRY(fixup_smp) ...@@ -457,4 +461,129 @@ ENTRY(fixup_smp)
ldmfd sp!, {r4 - r6, pc} ldmfd sp!, {r4 - r6, pc}
ENDPROC(fixup_smp) ENDPROC(fixup_smp)
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
/* __fixup_pv_table - patch the stub instructions with the delta between
* PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
* can be expressed by an immediate shifter operand. The stub instruction
* has a form of '(add|sub) rd, rn, #imm'.
*/
__HEAD
__fixup_pv_table:
adr r0, 1f
ldmia r0, {r3-r5, r7}
sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET
add r4, r4, r3 @ adjust table start address
add r5, r5, r3 @ adjust table end address
add r7, r7, r3 @ adjust __pv_phys_offset address
str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset
#ifndef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
mov r6, r3, lsr #24 @ constant for add/sub instructions
teq r3, r6, lsl #24 @ must be 16MiB aligned
#else
mov r6, r3, lsr #16 @ constant for add/sub instructions
teq r3, r6, lsl #16 @ must be 64kiB aligned
#endif
THUMB( it ne @ cross section branch )
bne __error
str r6, [r7, #4] @ save to __pv_offset
b __fixup_a_pv_table
ENDPROC(__fixup_pv_table)
.align
1: .long .
.long __pv_table_begin
.long __pv_table_end
2: .long __pv_phys_offset
.text
__fixup_a_pv_table:
#ifdef CONFIG_THUMB2_KERNEL
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
lsls r0, r6, #24
lsr r6, #8
beq 1f
clz r7, r0
lsr r0, #24
lsl r0, r7
bic r0, 0x0080
lsrs r7, #1
orrcs r0, #0x0080
orr r0, r0, r7, lsl #12
#endif
1: lsls r6, #24
beq 4f
clz r7, r6
lsr r6, #24
lsl r6, r7
bic r6, #0x0080
lsrs r7, #1
orrcs r6, #0x0080
orr r6, r6, r7, lsl #12
orr r6, #0x4000
b 4f
2: @ at this point the C flag is always clear
add r7, r3
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
ldrh ip, [r7]
tst ip, 0x0400 @ the i bit tells us LS or MS byte
beq 3f
cmp r0, #0 @ set C flag, and ...
biceq ip, 0x0400 @ immediate zero value has a special encoding
streqh ip, [r7] @ that requires the i bit cleared
#endif
3: ldrh ip, [r7, #2]
and ip, 0x8f00
orrcc ip, r6 @ mask in offset bits 31-24
orrcs ip, r0 @ mask in offset bits 23-16
strh ip, [r7, #2]
4: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b
bx lr
#else
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT_16BIT
and r0, r6, #255 @ offset bits 23-16
mov r6, r6, lsr #8 @ offset bits 31-24
#else
mov r0, #0 @ just in case...
#endif
b 3f
2: ldr ip, [r7, r3]
bic ip, ip, #0x000000ff
tst ip, #0x400 @ rotate shift tells us LS or MS byte
orrne ip, ip, r6 @ mask in offset bits 31-24
orreq ip, ip, r0 @ mask in offset bits 23-16
str ip, [r7, r3]
3: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
bcc 2b
mov pc, lr
#endif
ENDPROC(__fixup_a_pv_table)
ENTRY(fixup_pv_table)
stmfd sp!, {r4 - r7, lr}
ldr r2, 2f @ get address of __pv_phys_offset
mov r3, #0 @ no offset
mov r4, r0 @ r0 = table start
add r5, r0, r1 @ r1 = table size
ldr r6, [r2, #4] @ get __pv_offset
bl __fixup_a_pv_table
ldmfd sp!, {r4 - r7, pc}
ENDPROC(fixup_pv_table)
.align
2: .long __pv_phys_offset
.data
.globl __pv_phys_offset
.type __pv_phys_offset, %object
__pv_phys_offset:
.long 0
.size __pv_phys_offset, . - __pv_phys_offset
__pv_offset:
.long 0
#endif
#include "head-common.S" #include "head-common.S"
...@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void) ...@@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void)
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) static bool migrate_one_irq(struct irq_data *d)
{ {
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask);
bool ret = false;
raw_spin_lock_irq(&desc->lock); if (cpu >= nr_cpu_ids) {
desc->irq_data.chip->irq_set_affinity(&desc->irq_data, cpu = cpumask_any(cpu_online_mask);
cpumask_of(cpu), false); ret = true;
raw_spin_unlock_irq(&desc->lock); }
pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu);
d->chip->irq_set_affinity(d, cpumask_of(cpu), true);
return ret;
} }
/* /*
...@@ -198,25 +205,30 @@ void migrate_irqs(void) ...@@ -198,25 +205,30 @@ void migrate_irqs(void)
{ {
unsigned int i, cpu = smp_processor_id(); unsigned int i, cpu = smp_processor_id();
struct irq_desc *desc; struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) { for_each_irq_desc(i, desc) {
struct irq_data *d = &desc->irq_data; struct irq_data *d = &desc->irq_data;
bool affinity_broken = false;
if (d->node == cpu) { raw_spin_lock(&desc->lock);
unsigned int newcpu = cpumask_any_and(d->affinity, do {
cpu_online_mask); if (desc->action == NULL)
if (newcpu >= nr_cpu_ids) { break;
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu);
cpumask_setall(d->affinity); if (d->node != cpu)
newcpu = cpumask_any_and(d->affinity, break;
cpu_online_mask);
}
route_irq(desc, i, newcpu); affinity_broken = migrate_one_irq(d);
} } while (0);
raw_spin_unlock(&desc->lock);
if (affinity_broken && printk_ratelimit())
pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu);
} }
local_irq_restore(flags);
} }
#endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_HOTPLUG_CPU */
...@@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -76,6 +76,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) { for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rel); i++, rel++) {
unsigned long loc; unsigned long loc;
Elf32_Sym *sym; Elf32_Sym *sym;
const char *symname;
s32 offset; s32 offset;
#ifdef CONFIG_THUMB2_KERNEL #ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2; u32 upper, lower, sign, j1, j2;
...@@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -83,18 +84,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
offset = ELF32_R_SYM(rel->r_info); offset = ELF32_R_SYM(rel->r_info);
if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) { if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
printk(KERN_ERR "%s: bad relocation, section %d reloc %d\n", pr_err("%s: section %u reloc %u: bad relocation sym offset\n",
module->name, relindex, i); module->name, relindex, i);
return -ENOEXEC; return -ENOEXEC;
} }
sym = ((Elf32_Sym *)symsec->sh_addr) + offset; sym = ((Elf32_Sym *)symsec->sh_addr) + offset;
symname = strtab + sym->st_name;
if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) { if (rel->r_offset < 0 || rel->r_offset > dstsec->sh_size - sizeof(u32)) {
printk(KERN_ERR "%s: out of bounds relocation, " pr_err("%s: section %u reloc %u sym '%s': out of bounds relocation, offset %d size %u\n",
"section %d reloc %d offset %d size %d\n", module->name, relindex, i, symname,
module->name, relindex, i, rel->r_offset, rel->r_offset, dstsec->sh_size);
dstsec->sh_size);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -120,10 +121,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (offset & 3 || if (offset & 3 ||
offset <= (s32)0xfe000000 || offset <= (s32)0xfe000000 ||
offset >= (s32)0x02000000) { offset >= (s32)0x02000000) {
printk(KERN_ERR pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
"%s: relocation out of range, section " module->name, relindex, i, symname,
"%d reloc %d sym '%s'\n", module->name, ELF32_R_TYPE(rel->r_info), loc,
relindex, i, strtab + sym->st_name); sym->st_value);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -196,10 +197,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (!(offset & 1) || if (!(offset & 1) ||
offset <= (s32)0xff000000 || offset <= (s32)0xff000000 ||
offset >= (s32)0x01000000) { offset >= (s32)0x01000000) {
printk(KERN_ERR pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
"%s: relocation out of range, section " module->name, relindex, i, symname,
"%d reloc %d sym '%s'\n", module->name, ELF32_R_TYPE(rel->r_info), loc,
relindex, i, strtab + sym->st_name); sym->st_value);
return -ENOEXEC; return -ENOEXEC;
} }
...@@ -282,12 +283,13 @@ static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr, ...@@ -282,12 +283,13 @@ static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
return NULL; return NULL;
} }
extern void fixup_pv_table(const void *, unsigned long);
extern void fixup_smp(const void *, unsigned long); extern void fixup_smp(const void *, unsigned long);
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod) struct module *mod)
{ {
const Elf_Shdr * __maybe_unused s = NULL; const Elf_Shdr *s = NULL;
#ifdef CONFIG_ARM_UNWIND #ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum; const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
...@@ -331,6 +333,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, ...@@ -331,6 +333,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[i].unw_sec->sh_size, maps[i].unw_sec->sh_size,
maps[i].txt_sec->sh_addr, maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size); maps[i].txt_sec->sh_size);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
s = find_mod_section(hdr, sechdrs, ".pv_table");
if (s)
fixup_pv_table((void *)s->sh_addr, s->sh_size);
#endif #endif
s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
if (s && !is_smp()) if (s && !is_smp())
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
* enable the interrupt. * enable the interrupt.
*/ */
#ifdef CONFIG_CPU_V6 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types { enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0, ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1, ARMV6_PERFCTR_IBUF_STALL = 0x1,
...@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void) ...@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{ {
return NULL; return NULL;
} }
#endif /* CONFIG_CPU_V6 */ #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/traps.h> #include <asm/traps.h>
#include "ptrace.h"
#define REG_PC 15 #define REG_PC 15
#define REG_PSR 16 #define REG_PSR 16
/* /*
...@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data) ...@@ -184,389 +182,12 @@ put_user_reg(struct task_struct *task, int offset, long data)
return ret; return ret;
} }
static inline int
read_u32(struct task_struct *task, unsigned long addr, u32 *res)
{
int ret;
ret = access_process_vm(task, addr, res, sizeof(*res), 0);
return ret == sizeof(*res) ? 0 : -EIO;
}
static inline int
read_instr(struct task_struct *task, unsigned long addr, u32 *res)
{
int ret;
if (addr & 1) {
u16 val;
ret = access_process_vm(task, addr & ~1, &val, sizeof(val), 0);
ret = ret == sizeof(val) ? 0 : -EIO;
*res = val;
} else {
u32 val;
ret = access_process_vm(task, addr & ~3, &val, sizeof(val), 0);
ret = ret == sizeof(val) ? 0 : -EIO;
*res = val;
}
return ret;
}
/*
* Get value of register `rn' (in the instruction)
*/
static unsigned long
ptrace_getrn(struct task_struct *child, unsigned long insn)
{
unsigned int reg = (insn >> 16) & 15;
unsigned long val;
val = get_user_reg(child, reg);
if (reg == 15)
val += 8;
return val;
}
/*
* Get value of operand 2 (in an ALU instruction)
*/
static unsigned long
ptrace_getaluop2(struct task_struct *child, unsigned long insn)
{
unsigned long val;
int shift;
int type;
if (insn & 1 << 25) {
val = insn & 255;
shift = (insn >> 8) & 15;
type = 3;
} else {
val = get_user_reg (child, insn & 15);
if (insn & (1 << 4))
shift = (int)get_user_reg (child, (insn >> 8) & 15);
else
shift = (insn >> 7) & 31;
type = (insn >> 5) & 3;
}
switch (type) {
case 0: val <<= shift; break;
case 1: val >>= shift; break;
case 2:
val = (((signed long)val) >> shift);
break;
case 3:
val = (val >> shift) | (val << (32 - shift));
break;
}
return val;
}
/*
* Get value of operand 2 (in a LDR instruction)
*/
static unsigned long
ptrace_getldrop2(struct task_struct *child, unsigned long insn)
{
unsigned long val;
int shift;
int type;
val = get_user_reg(child, insn & 15);
shift = (insn >> 7) & 31;
type = (insn >> 5) & 3;
switch (type) {
case 0: val <<= shift; break;
case 1: val >>= shift; break;
case 2:
val = (((signed long)val) >> shift);
break;
case 3:
val = (val >> shift) | (val << (32 - shift));
break;
}
return val;
}
#define OP_MASK 0x01e00000
#define OP_AND 0x00000000
#define OP_EOR 0x00200000
#define OP_SUB 0x00400000
#define OP_RSB 0x00600000
#define OP_ADD 0x00800000
#define OP_ADC 0x00a00000
#define OP_SBC 0x00c00000
#define OP_RSC 0x00e00000
#define OP_ORR 0x01800000
#define OP_MOV 0x01a00000
#define OP_BIC 0x01c00000
#define OP_MVN 0x01e00000
static unsigned long
get_branch_address(struct task_struct *child, unsigned long pc, unsigned long insn)
{
u32 alt = 0;
switch (insn & 0x0e000000) {
case 0x00000000:
case 0x02000000: {
/*
* data processing
*/
long aluop1, aluop2, ccbit;
if ((insn & 0x0fffffd0) == 0x012fff10) {
/*
* bx or blx
*/
alt = get_user_reg(child, insn & 15);
break;
}
if ((insn & 0xf000) != 0xf000)
break;
aluop1 = ptrace_getrn(child, insn);
aluop2 = ptrace_getaluop2(child, insn);
ccbit = get_user_reg(child, REG_PSR) & PSR_C_BIT ? 1 : 0;
switch (insn & OP_MASK) {
case OP_AND: alt = aluop1 & aluop2; break;
case OP_EOR: alt = aluop1 ^ aluop2; break;
case OP_SUB: alt = aluop1 - aluop2; break;
case OP_RSB: alt = aluop2 - aluop1; break;
case OP_ADD: alt = aluop1 + aluop2; break;
case OP_ADC: alt = aluop1 + aluop2 + ccbit; break;
case OP_SBC: alt = aluop1 - aluop2 + ccbit; break;
case OP_RSC: alt = aluop2 - aluop1 + ccbit; break;
case OP_ORR: alt = aluop1 | aluop2; break;
case OP_MOV: alt = aluop2; break;
case OP_BIC: alt = aluop1 & ~aluop2; break;
case OP_MVN: alt = ~aluop2; break;
}
break;
}
case 0x04000000:
case 0x06000000:
/*
* ldr
*/
if ((insn & 0x0010f000) == 0x0010f000) {
unsigned long base;
base = ptrace_getrn(child, insn);
if (insn & 1 << 24) {
long aluop2;
if (insn & 0x02000000)
aluop2 = ptrace_getldrop2(child, insn);
else
aluop2 = insn & 0xfff;
if (insn & 1 << 23)
base += aluop2;
else
base -= aluop2;
}
read_u32(child, base, &alt);
}
break;
case 0x08000000:
/*
* ldm
*/
if ((insn & 0x00108000) == 0x00108000) {
unsigned long base;
unsigned int nr_regs;
if (insn & (1 << 23)) {
nr_regs = hweight16(insn & 65535) << 2;
if (!(insn & (1 << 24)))
nr_regs -= 4;
} else {
if (insn & (1 << 24))
nr_regs = -4;
else
nr_regs = 0;
}
base = ptrace_getrn(child, insn);
read_u32(child, base + nr_regs, &alt);
break;
}
break;
case 0x0a000000: {
/*
* bl or b
*/
signed long displ;
/* It's a branch/branch link: instead of trying to
* figure out whether the branch will be taken or not,
* we'll put a breakpoint at both locations. This is
* simpler, more reliable, and probably not a whole lot
* slower than the alternative approach of emulating the
* branch.
*/
displ = (insn & 0x00ffffff) << 8;
displ = (displ >> 6) + 8;
if (displ != 0 && displ != 4)
alt = pc + displ;
}
break;
}
return alt;
}
static int
swap_insn(struct task_struct *task, unsigned long addr,
void *old_insn, void *new_insn, int size)
{
int ret;
ret = access_process_vm(task, addr, old_insn, size, 0);
if (ret == size)
ret = access_process_vm(task, addr, new_insn, size, 1);
return ret;
}
static void
add_breakpoint(struct task_struct *task, struct debug_info *dbg, unsigned long addr)
{
int nr = dbg->nsaved;
if (nr < 2) {
u32 new_insn = BREAKINST_ARM;
int res;
res = swap_insn(task, addr, &dbg->bp[nr].insn, &new_insn, 4);
if (res == 4) {
dbg->bp[nr].address = addr;
dbg->nsaved += 1;
}
} else
printk(KERN_ERR "ptrace: too many breakpoints\n");
}
/*
* Clear one breakpoint in the user program. We copy what the hardware
* does and use bit 0 of the address to indicate whether this is a Thumb
* breakpoint or an ARM breakpoint.
*/
static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
{
unsigned long addr = bp->address;
union debug_insn old_insn;
int ret;
if (addr & 1) {
ret = swap_insn(task, addr & ~1, &old_insn.thumb,
&bp->insn.thumb, 2);
if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
"0x%08lx (0x%04x)\n", task->comm,
task_pid_nr(task), addr, old_insn.thumb);
} else {
ret = swap_insn(task, addr & ~3, &old_insn.arm,
&bp->insn.arm, 4);
if (ret != 4 || old_insn.arm != BREAKINST_ARM)
printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
"0x%08lx (0x%08x)\n", task->comm,
task_pid_nr(task), addr, old_insn.arm);
}
}
void ptrace_set_bpt(struct task_struct *child)
{
struct pt_regs *regs;
unsigned long pc;
u32 insn;
int res;
regs = task_pt_regs(child);
pc = instruction_pointer(regs);
if (thumb_mode(regs)) {
printk(KERN_WARNING "ptrace: can't handle thumb mode\n");
return;
}
res = read_instr(child, pc, &insn);
if (!res) {
struct debug_info *dbg = &child->thread.debug;
unsigned long alt;
dbg->nsaved = 0;
alt = get_branch_address(child, pc, insn);
if (alt)
add_breakpoint(child, dbg, alt);
/*
* Note that we ignore the result of setting the above
* breakpoint since it may fail. When it does, this is
* not so much an error, but a forewarning that we may
* be receiving a prefetch abort shortly.
*
* If we don't set this breakpoint here, then we can
* lose control of the thread during single stepping.
*/
if (!alt || predicate(insn) != PREDICATE_ALWAYS)
add_breakpoint(child, dbg, pc + 4);
}
}
/*
* Ensure no single-step breakpoint is pending. Returns non-zero
* value if child was being single-stepped.
*/
void ptrace_cancel_bpt(struct task_struct *child)
{
int i, nsaved = child->thread.debug.nsaved;
child->thread.debug.nsaved = 0;
if (nsaved > 2) {
printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
nsaved = 2;
}
for (i = 0; i < nsaved; i++)
clear_breakpoint(child, &child->thread.debug.bp[i]);
}
void user_disable_single_step(struct task_struct *task)
{
task->ptrace &= ~PT_SINGLESTEP;
ptrace_cancel_bpt(task);
}
void user_enable_single_step(struct task_struct *task)
{
task->ptrace |= PT_SINGLESTEP;
}
/* /*
* Called by kernel/ptrace.c when detaching.. * Called by kernel/ptrace.c when detaching..
*/ */
void ptrace_disable(struct task_struct *child) void ptrace_disable(struct task_struct *child)
{ {
user_disable_single_step(child); /* Nothing to do. */
} }
/* /*
...@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs) ...@@ -576,8 +197,6 @@ void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{ {
siginfo_t info; siginfo_t info;
ptrace_cancel_bpt(tsk);
info.si_signo = SIGTRAP; info.si_signo = SIGTRAP;
info.si_errno = 0; info.si_errno = 0;
info.si_code = TRAP_BRKPT; info.si_code = TRAP_BRKPT;
......
/*
* linux/arch/arm/kernel/ptrace.h
*
* Copyright (C) 2000-2003 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/ptrace.h>
extern void ptrace_cancel_bpt(struct task_struct *);
extern void ptrace_set_bpt(struct task_struct *);
extern void ptrace_break(struct task_struct *, struct pt_regs *);
/*
* Send SIGTRAP if we're single-stepping
*/
static inline void single_step_trap(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP) {
ptrace_cancel_bpt(task);
send_sig(SIGTRAP, task, 1);
}
}
static inline void single_step_clear(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP)
ptrace_cancel_bpt(task);
}
static inline void single_step_set(struct task_struct *task)
{
if (task->ptrace & PT_SINGLESTEP)
ptrace_set_bpt(task);
}
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* the Free Software Foundation. * the Free Software Foundation.
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h> #include <linux/sched.h>
......
...@@ -308,7 +308,22 @@ static void __init cacheid_init(void) ...@@ -308,7 +308,22 @@ static void __init cacheid_init(void)
* already provide the required functionality. * already provide the required functionality.
*/ */
extern struct proc_info_list *lookup_processor_type(unsigned int); extern struct proc_info_list *lookup_processor_type(unsigned int);
extern struct machine_desc *lookup_machine_type(unsigned int);
static void __init early_print(const char *str, ...)
{
extern void printascii(const char *);
char buf[256];
va_list ap;
va_start(ap, str);
vsnprintf(buf, sizeof(buf), str, ap);
va_end(ap);
#ifdef CONFIG_DEBUG_LL
printascii(buf);
#endif
printk("%s", buf);
}
static void __init feat_v6_fixup(void) static void __init feat_v6_fixup(void)
{ {
...@@ -426,21 +441,29 @@ void cpu_init(void) ...@@ -426,21 +441,29 @@ void cpu_init(void)
static struct machine_desc * __init setup_machine(unsigned int nr) static struct machine_desc * __init setup_machine(unsigned int nr)
{ {
struct machine_desc *list; extern struct machine_desc __arch_info_begin[], __arch_info_end[];
struct machine_desc *p;
/* /*
* locate machine in the list of supported machines. * locate machine in the list of supported machines.
*/ */
list = lookup_machine_type(nr); for (p = __arch_info_begin; p < __arch_info_end; p++)
if (!list) { if (nr == p->nr) {
printk("Machine configuration botched (nr %d), unable " printk("Machine: %s\n", p->name);
"to continue.\n", nr); return p;
while (1); }
}
printk("Machine: %s\n", list->name); early_print("\n"
"Error: unrecognized/unsupported machine ID (r1 = 0x%08x).\n\n"
"Available machine support:\n\nID (hex)\tNAME\n", nr);
return list; for (p = __arch_info_begin; p < __arch_info_end; p++)
early_print("%08x\t%s\n", p->nr, p->name);
early_print("\nPlease check your kernel config and/or bootloader.\n");
while (true)
/* can't use cpu_relax() here as it may require MMU setup */;
} }
static int __init arm_add_memory(unsigned long start, unsigned long size) static int __init arm_add_memory(unsigned long start, unsigned long size)
...@@ -703,7 +726,7 @@ static struct init_tags { ...@@ -703,7 +726,7 @@ static struct init_tags {
{ tag_size(tag_core), ATAG_CORE }, { tag_size(tag_core), ATAG_CORE },
{ 1, PAGE_SIZE, 0xff }, { 1, PAGE_SIZE, 0xff },
{ tag_size(tag_mem32), ATAG_MEM }, { tag_size(tag_mem32), ATAG_MEM },
{ MEM_SIZE, PHYS_OFFSET }, { MEM_SIZE },
{ 0, ATAG_NONE } { 0, ATAG_NONE }
}; };
...@@ -802,6 +825,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -802,6 +825,8 @@ void __init setup_arch(char **cmdline_p)
struct machine_desc *mdesc; struct machine_desc *mdesc;
char *from = default_command_line; char *from = default_command_line;
init_tags.mem.start = PHYS_OFFSET;
unwind_init(); unwind_init();
setup_processor(); setup_processor();
...@@ -814,8 +839,25 @@ void __init setup_arch(char **cmdline_p) ...@@ -814,8 +839,25 @@ void __init setup_arch(char **cmdline_p)
if (__atags_pointer) if (__atags_pointer)
tags = phys_to_virt(__atags_pointer); tags = phys_to_virt(__atags_pointer);
else if (mdesc->boot_params) else if (mdesc->boot_params) {
tags = phys_to_virt(mdesc->boot_params); #ifdef CONFIG_MMU
/*
* We still are executing with a minimal MMU mapping created
* with the presumption that the machine default for this
* is located in the first MB of RAM. Anything else will
* fault and silently hang the kernel at this point.
*/
if (mdesc->boot_params < PHYS_OFFSET ||
mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
printk(KERN_WARNING
"Default boot params at physical 0x%08lx out of reach\n",
mdesc->boot_params);
} else
#endif
{
tags = phys_to_virt(mdesc->boot_params);
}
}
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
/* /*
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/vfp.h> #include <asm/vfp.h>
#include "ptrace.h"
#include "signal.h" #include "signal.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
...@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) ...@@ -348,8 +347,6 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs)
if (restore_sigframe(regs, frame)) if (restore_sigframe(regs, frame))
goto badframe; goto badframe;
single_step_trap(current);
return regs->ARM_r0; return regs->ARM_r0;
badframe: badframe:
...@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) ...@@ -383,8 +380,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT) if (do_sigaltstack(&frame->sig.uc.uc_stack, NULL, regs->ARM_sp) == -EFAULT)
goto badframe; goto badframe;
single_step_trap(current);
return regs->ARM_r0; return regs->ARM_r0;
badframe: badframe:
...@@ -706,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall) ...@@ -706,8 +701,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (try_to_freeze()) if (try_to_freeze())
goto no_signal; goto no_signal;
single_step_clear(current);
signr = get_signal_to_deliver(&info, &ka, regs, NULL); signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) { if (signr > 0) {
sigset_t *oldset; sigset_t *oldset;
...@@ -726,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall) ...@@ -726,7 +719,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (test_thread_flag(TIF_RESTORE_SIGMASK)) if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK); clear_thread_flag(TIF_RESTORE_SIGMASK);
} }
single_step_set(current);
return; return;
} }
...@@ -772,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall) ...@@ -772,7 +764,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
} }
} }
single_step_set(current);
} }
asmlinkage void asmlinkage void
......
#include <linux/linkage.h>
#include <linux/threads.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
#include <asm/system.h>
.text
/*
* Save CPU state for a suspend
* r1 = v:p offset
* r3 = virtual return function
* Note: sp is decremented to allocate space for CPU state on stack
* r0-r3,r9,r10,lr corrupted
*/
ENTRY(cpu_suspend)
mov r9, lr
#ifdef MULTI_CPU
ldr r10, =processor
mov r2, sp @ current virtual SP
ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
add ip, ip, r1 @ convert resume fn to phys
stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
mov lr, pc
ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
#else
mov r2, sp @ current virtual SP
ldr r0, =cpu_suspend_size
sub sp, sp, r0 @ allocate CPU state on stack
mov r0, sp @ save pointer
stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
ldr r3, =sleep_save_sp
add r2, sp, r1 @ convert SP to phys
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
ALT_UP(mov lr, #0)
and lr, lr, #15
str r2, [r3, lr, lsl #2] @ save phys SP
#else
str r2, [r3] @ save phys SP
#endif
bl cpu_do_suspend
#endif
@ flush data cache
#ifdef MULTI_CACHE
ldr r10, =cpu_cache
mov lr, r9
ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
#else
mov lr, r9
b __cpuc_flush_kern_all
#endif
ENDPROC(cpu_suspend)
.ltorg
/*
* r0 = control register value
* r1 = v:p offset (preserved by cpu_do_resume)
* r2 = phys page table base
* r3 = L1 section flags
*/
ENTRY(cpu_resume_mmu)
adr r4, cpu_resume_turn_mmu_on
mov r4, r4, lsr #20
orr r3, r3, r4, lsl #20
ldr r5, [r2, r4, lsl #2] @ save old mapping
str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
sub r2, r2, r1
ldr r3, =cpu_resume_after_mmu
bic r1, r0, #CR_C @ ensure D-cache is disabled
b cpu_resume_turn_mmu_on
ENDPROC(cpu_resume_mmu)
.ltorg
.align 5
cpu_resume_turn_mmu_on:
mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r1, c0, c0, 0 @ read id reg
mov r1, r1
mov r1, r1
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_turn_mmu_on)
cpu_resume_after_mmu:
str r5, [r2, r4, lsl #2] @ restore old mapping
mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
mov pc, lr
ENDPROC(cpu_resume_after_mmu)
/*
* Note: Yes, part of the following code is located into the .data section.
* This is to allow sleep_save_sp to be accessed with a relative load
* while we can't rely on any MMU translation. We could have put
* sleep_save_sp in the .text section as well, but some setups might
* insist on it to be truly read-only.
*/
.data
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
adr r0, sleep_save_sp
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
ALT_UP(mov r1, #0)
and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
#endif
msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
#ifdef MULTI_CPU
ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
#else
ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
b cpu_do_resume
#endif
ENDPROC(cpu_resume)
sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
...@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base) ...@@ -50,3 +50,26 @@ void __init scu_enable(void __iomem *scu_base)
*/ */
flush_cache_all(); flush_cache_all();
} }
/*
* Set the executing CPUs power mode as defined. This will be in
* preparation for it executing a WFI instruction.
*
* This function must be called with preemption disabled, and as it
* has the side effect of disabling coherency, caches must have been
* flushed. Interrupts must also have been disabled.
*/
int scu_power_mode(void __iomem *scu_base, unsigned int mode)
{
unsigned int val;
int cpu = smp_processor_id();
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/string.h> /* memcpy */ #include <linux/string.h> /* memcpy */
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <mach/memory.h> #include <asm/memory.h>
#include "tcm.h" #include "tcm.h"
static struct gen_pool *tcm_pool; static struct gen_pool *tcm_pool;
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/sched.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -32,7 +33,6 @@ ...@@ -32,7 +33,6 @@
#include <asm/unwind.h> #include <asm/unwind.h>
#include <asm/tls.h> #include <asm/tls.h>
#include "ptrace.h"
#include "signal.h" #include "signal.h"
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
...@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt ...@@ -256,7 +256,7 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
return ret; return ret;
} }
DEFINE_SPINLOCK(die_lock); static DEFINE_SPINLOCK(die_lock);
/* /*
* This function is protected against re-entrancy. * This function is protected against re-entrancy.
......
...@@ -64,6 +64,10 @@ SECTIONS ...@@ -64,6 +64,10 @@ SECTIONS
__smpalt_end = .; __smpalt_end = .;
#endif #endif
__pv_table_begin = .;
*(.pv_table)
__pv_table_end = .;
INIT_SETUP(16) INIT_SETUP(16)
INIT_CALLS INIT_CALLS
......
#if __LINUX_ARM_ARCH__ >= 6
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
.macro bitop, instr .macro bitop, instr
ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
and r3, r0, #7 @ Get bit offset and r3, r0, #31 @ Get bit offset
add r1, r1, r0, lsr #3 @ Get byte offset mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 mov r3, r2, lsl r3
1: ldrexb r2, [r1] 1: ldrex r2, [r1]
\instr r2, r2, r3 \instr r2, r2, r3
strexb r0, r2, [r1] strex r0, r2, [r1]
cmp r0, #0 cmp r0, #0
bne 1b bne 1b
mov pc, lr bx lr
.endm .endm
.macro testop, instr, store .macro testop, instr, store
and r3, r0, #7 @ Get bit offset ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset and r3, r0, #31 @ Get bit offset
mov r0, r0, lsr #5
add r1, r1, r0, lsl #2 @ Get word offset
mov r3, r2, lsl r3 @ create mask mov r3, r2, lsl r3 @ create mask
smp_dmb smp_dmb
1: ldrexb r2, [r1] 1: ldrex r2, [r1]
ands r0, r2, r3 @ save old value of bit ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit \instr r2, r2, r3 @ toggle bit
strexb ip, r2, [r1] strex ip, r2, [r1]
cmp ip, #0 cmp ip, #0
bne 1b bne 1b
smp_dmb smp_dmb
cmp r0, #0 cmp r0, #0
movne r0, #1 movne r0, #1
2: mov pc, lr 2: bx lr
.endm .endm
#else #else
.macro bitop, instr .macro bitop, instr
and r2, r0, #7 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned
and r2, r0, #31
mov r0, r0, lsr #5
mov r3, #1 mov r3, #1
mov r3, r3, lsl r2 mov r3, r3, lsl r2
save_and_disable_irqs ip save_and_disable_irqs ip
ldrb r2, [r1, r0, lsr #3] ldr r2, [r1, r0, lsl #2]
\instr r2, r2, r3 \instr r2, r2, r3
strb r2, [r1, r0, lsr #3] str r2, [r1, r0, lsl #2]
restore_irqs ip restore_irqs ip
mov pc, lr mov pc, lr
.endm .endm
...@@ -52,11 +60,13 @@ ...@@ -52,11 +60,13 @@
* to avoid dirtying the data cache. * to avoid dirtying the data cache.
*/ */
.macro testop, instr, store .macro testop, instr, store
add r1, r1, r0, lsr #3 ands ip, r1, #3
and r3, r0, #7 strneb r1, [ip] @ assert word-aligned
mov r0, #1 and r3, r0, #31
mov r0, r0, lsr #5
save_and_disable_irqs ip save_and_disable_irqs ip
ldrb r2, [r1] ldr r2, [r1, r0, lsl #2]!
mov r0, #1
tst r2, r0, lsl r3 tst r2, r0, lsl r3
\instr r2, r2, r0, lsl r3 \instr r2, r2, r0, lsl r3
\store r2, [r1] \store r2, [r1]
......
...@@ -12,12 +12,6 @@ ...@@ -12,12 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
/* Purpose : Function to change a bit ENTRY(_change_bit)
* Prototype: int change_bit(int bit, void *addr)
*/
ENTRY(_change_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_change_bit_le)
bitop eor bitop eor
ENDPROC(_change_bit_be) ENDPROC(_change_bit)
ENDPROC(_change_bit_le)
...@@ -12,13 +12,6 @@ ...@@ -12,13 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
/* ENTRY(_clear_bit)
* Purpose : Function to clear a bit
* Prototype: int clear_bit(int bit, void *addr)
*/
ENTRY(_clear_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_clear_bit_le)
bitop bic bitop bic
ENDPROC(_clear_bit_be) ENDPROC(_clear_bit)
ENDPROC(_clear_bit_le)
...@@ -12,13 +12,6 @@ ...@@ -12,13 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
/* ENTRY(_set_bit)
* Purpose : Function to set a bit
* Prototype: int set_bit(int bit, void *addr)
*/
ENTRY(_set_bit_be)
eor r0, r0, #0x18 @ big endian byte ordering
ENTRY(_set_bit_le)
bitop orr bitop orr
ENDPROC(_set_bit_be) ENDPROC(_set_bit)
ENDPROC(_set_bit_le)
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_change_bit_be) ENTRY(_test_and_change_bit)
eor r0, r0, #0x18 @ big endian byte ordering testop eor, str
ENTRY(_test_and_change_bit_le) ENDPROC(_test_and_change_bit)
testop eor, strb
ENDPROC(_test_and_change_bit_be)
ENDPROC(_test_and_change_bit_le)
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_clear_bit_be) ENTRY(_test_and_clear_bit)
eor r0, r0, #0x18 @ big endian byte ordering testop bicne, strne
ENTRY(_test_and_clear_bit_le) ENDPROC(_test_and_clear_bit)
testop bicne, strneb
ENDPROC(_test_and_clear_bit_be)
ENDPROC(_test_and_clear_bit_le)
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_set_bit_be) ENTRY(_test_and_set_bit)
eor r0, r0, #0x18 @ big endian byte ordering testop orreq, streq
ENTRY(_test_and_set_bit_le) ENDPROC(_test_and_set_bit)
testop orreq, streqb
ENDPROC(_test_and_set_bit_be)
ENDPROC(_test_and_set_bit_le)
...@@ -12,6 +12,6 @@ ...@@ -12,6 +12,6 @@
#define __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H
#define PHYS_OFFSET UL(0xf0000000) #define PLAT_PHYS_OFFSET UL(0xf0000000)
#endif /* __ASM_ARCH_MEMORY_H */ #endif /* __ASM_ARCH_MEMORY_H */
...@@ -153,6 +153,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = { ...@@ -153,6 +153,7 @@ static struct i2c_board_info __initdata snapper9260_i2c_devices[] = {
{ {
/* RTC */ /* RTC */
I2C_BOARD_INFO("isl1208", 0x6f), I2C_BOARD_INFO("isl1208", 0x6f),
.irq = gpio_to_irq(AT91_PIN_PA31),
}, },
}; };
......
...@@ -220,15 +220,8 @@ extern void at91_gpio_resume(void); ...@@ -220,15 +220,8 @@ extern void at91_gpio_resume(void);
#define gpio_set_value __gpio_set_value #define gpio_set_value __gpio_set_value
#define gpio_cansleep __gpio_cansleep #define gpio_cansleep __gpio_cansleep
static inline int gpio_to_irq(unsigned gpio) #define gpio_to_irq(gpio) (gpio)
{ #define irq_to_gpio(irq) (irq)
return gpio;
}
static inline int irq_to_gpio(unsigned irq)
{
return irq;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -23,6 +23,6 @@ ...@@ -23,6 +23,6 @@
#include <mach/hardware.h> #include <mach/hardware.h>
#define PHYS_OFFSET (AT91_SDRAM_BASE) #define PLAT_PHYS_OFFSET (AT91_SDRAM_BASE)
#endif #endif
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
* *_SIZE is the size of the region * *_SIZE is the size of the region
* *_BASE is the virtual address * *_BASE is the virtual address
*/ */
#define RAM_START PHYS_OFFSET #define RAM_START PLAT_PHYS_OFFSET
#define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED) #define RAM_SIZE (CFG_GLOBAL_RAM_SIZE-CFG_GLOBAL_RAM_SIZE_RESERVED)
#define RAM_BASE PAGE_OFFSET #define RAM_BASE PAGE_OFFSET
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead. * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/ */
#define PHYS_OFFSET CFG_GLOBAL_RAM_BASE #define PLAT_PHYS_OFFSET CFG_GLOBAL_RAM_BASE
/* /*
* Maximum DMA memory allowed is 14M * Maximum DMA memory allowed is 14M
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/* /*
* Physical DRAM offset. * Physical DRAM offset.
*/ */
#define PHYS_OFFSET UL(0xc0000000) #define PLAT_PHYS_OFFSET UL(0xc0000000)
#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12) #if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* /*
* Physical DRAM offset. * Physical DRAM offset.
*/ */
#define PHYS_OFFSET UL(0x00000000) #define PLAT_PHYS_OFFSET UL(0x00000000)
#define __phys_to_bus(x) ((x) + PHYS_OFFSET) #define __phys_to_bus(x) ((x) + PHYS_OFFSET)
#define __bus_to_phys(x) ((x) - PHYS_OFFSET) #define __bus_to_phys(x) ((x) - PHYS_OFFSET)
......
...@@ -26,9 +26,9 @@ ...@@ -26,9 +26,9 @@
#if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx) #if defined(CONFIG_ARCH_DAVINCI_DA8XX) && defined(CONFIG_ARCH_DAVINCI_DMx)
#error Cannot enable DaVinci and DA8XX platforms concurrently #error Cannot enable DaVinci and DA8XX platforms concurrently
#elif defined(CONFIG_ARCH_DAVINCI_DA8XX) #elif defined(CONFIG_ARCH_DAVINCI_DA8XX)
#define PHYS_OFFSET DA8XX_DDR_BASE #define PLAT_PHYS_OFFSET DA8XX_DDR_BASE
#else #else
#define PHYS_OFFSET DAVINCI_DDR_BASE #define PLAT_PHYS_OFFSET DAVINCI_DDR_BASE
#endif #endif
#define DDR2_SDRCR_OFFSET 0xc #define DDR2_SDRCR_OFFSET 0xc
......
...@@ -9,7 +9,7 @@ config MACH_DOVE_DB ...@@ -9,7 +9,7 @@ config MACH_DOVE_DB
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
Marvell DB-MV88AP510 Development Board. Marvell DB-MV88AP510 Development Board.
config MACH_CM_A510 config MACH_CM_A510
bool "CompuLab CM-A510 Board" bool "CompuLab CM-A510 Board"
help help
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
......
...@@ -5,6 +5,6 @@ ...@@ -5,6 +5,6 @@
#ifndef __ASM_ARCH_MEMORY_H #ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H
#define PHYS_OFFSET UL(0x00000000) #define PLAT_PHYS_OFFSET UL(0x00000000)
#endif #endif
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
/* /*
* Physical DRAM offset. * Physical DRAM offset.
*/ */
#define PHYS_OFFSET UL(0x00000000) #define PLAT_PHYS_OFFSET UL(0x00000000)
/* /*
* Cache flushing area - SRAM * Cache flushing area - SRAM
......
This diff is collapsed.
This diff is collapsed.
...@@ -99,8 +99,6 @@ ...@@ -99,8 +99,6 @@
/* maximum value for irq capable line identifiers */ /* maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7) #define EP93XX_GPIO_LINE_MAX_IRQ EP93XX_GPIO_LINE_F(7)
extern void ep93xx_gpio_int_debounce(unsigned int irq, int enable);
/* new generic GPIO API - see Documentation/gpio.txt */ /* new generic GPIO API - see Documentation/gpio.txt */
#include <asm-generic/gpio.h> #include <asm-generic/gpio.h>
......
...@@ -6,15 +6,15 @@ ...@@ -6,15 +6,15 @@
#define __ASM_ARCH_MEMORY_H #define __ASM_ARCH_MEMORY_H
#if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET) #if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
#define PHYS_OFFSET UL(0x00000000) #define PLAT_PHYS_OFFSET UL(0x00000000)
#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET) #elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xc0000000) #define PLAT_PHYS_OFFSET UL(0xc0000000)
#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET) #elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xd0000000) #define PLAT_PHYS_OFFSET UL(0xd0000000)
#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET) #elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xe0000000) #define PLAT_PHYS_OFFSET UL(0xe0000000)
#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET) #elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
#define PHYS_OFFSET UL(0xf0000000) #define PLAT_PHYS_OFFSET UL(0xf0000000)
#else #else
#error "Kconfig bug: No EP93xx PHYS_OFFSET set" #error "Kconfig bug: No EP93xx PHYS_OFFSET set"
#endif #endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment